repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| gpl-3.0 |
southpaw94/MachineLearning | Perceptron/Iris.py | 1 | 1993 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from Perceptron import Perceptron
def plotRawData():
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper left')
plt.show()
plt.cla()
def plotErrors():
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()
plt.cla()
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors=('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y ==cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
# print(df.tail())
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0, 2]].values
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
plot_decision_regions(X, y, ppn)
plt.cla()
| gpl-2.0 |
srus/django-kickstartvenv | boot/ipython/ipython_config.py | 3 | 19804 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = [
'from __future__ import unicode_literals, print_function, absolute_import, division'
]
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.3 (default, Feb 27 2014, 19:58:35) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.2.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
mjudsp/Tsallis | examples/classification/plot_digits_classification.py | 34 | 2409 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
gtrensch/nest-simulator | pynest/examples/clopath_synapse_small_network.py | 8 | 7493 | # -*- coding: utf-8 -*-
#
# clopath_synapse_small_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Bidirectional connections
---------------------------------------
This script simulates a small network of ten excitatory and three
inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected
and driven by 500 Poisson generators. The synapses from the Poisson generators
to the excitatory population and those among the neurons of the network
are Clopath synapses. The rate of the Poisson generators is modulated with
a Gaussian profile whose center shifts randomly each 100 ms between ten
equally spaced positions.
This setup demonstrates that the Clopath synapse is able to establish
bidirectional connections. The example is adapted from [1]_ (cf. fig. 5).
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
import random
##############################################################################
# Set the parameters
simulation_time = 1.0e4
resolution = 0.1
delay = resolution
# Poisson_generator parameters
pg_A = 30. # amplitude of Gaussian
pg_sigma = 10. # std deviation
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
# Create neurons and devices
nrn_model = 'aeif_psc_delta_clopath'
nrn_params = {'V_m': -30.6,
'g_L': 30.0,
'w': 0.0,
'tau_plus': 7.0,
'tau_minus': 10.0,
'tau_w': 144.0,
'a': 4.0,
'C_m': 281.0,
'Delta_T': 2.0,
'V_peak': 20.0,
't_clamp': 2.0,
'A_LTP': 8.0e-6,
'A_LTD': 14.0e-6,
'A_LTD_const': False,
'b': 0.0805,
'u_ref_squared': 60.0**2}
pop_exc = nest.Create(nrn_model, 10, nrn_params)
pop_inh = nest.Create(nrn_model, 3, nrn_params)
##############################################################################
# We need parrot neurons since Poisson generators can only be connected
# with static connections
pop_input = nest.Create('parrot_neuron', 500) # helper neurons
pg = nest.Create('poisson_generator', 500)
wr = nest.Create('weight_recorder')
##############################################################################
# First connect Poisson generators to helper neurons
nest.Connect(pg, pop_input, 'one_to_one', {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay})
##############################################################################
# Create all the connections
nest.CopyModel('clopath_synapse', 'clopath_input_to_exc',
{'Wmax': 3.0})
conn_dict_input_to_exc = {'rule': 'all_to_all'}
syn_dict_input_to_exc = {'synapse_model': 'clopath_input_to_exc',
'weight': nest.random.uniform(0.5, 2.0),
'delay': delay}
nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc,
syn_dict_input_to_exc)
# Create input->inh connections
conn_dict_input_to_inh = {'rule': 'all_to_all'}
syn_dict_input_to_inh = {'synapse_model': 'static_synapse',
'weight': nest.random.uniform(0.0, 0.5),
'delay': delay}
nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh)
# Create exc->exc connections
nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc',
{'Wmax': 0.75, 'weight_recorder': wr})
syn_dict_exc_to_exc = {'synapse_model': 'clopath_exc_to_exc', 'weight': 0.25,
'delay': delay}
conn_dict_exc_to_exc = {'rule': 'all_to_all', 'allow_autapses': False}
nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc)
# Create exc->inh connections
syn_dict_exc_to_inh = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8}
nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh)
# Create inh->exc connections
syn_dict_inh_to_exc = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6}
nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc)
##############################################################################
# Randomize the initial membrane potential
pop_exc.V_m = nest.random.normal(-60., 25.)
pop_inh.V_m = nest.random.normal(-60., 25.)
##############################################################################
# Simulation divided into intervals of 100ms for shifting the Gaussian
sim_interval = 100.
for i in range(int(simulation_time/sim_interval)):
# set rates of poisson generators
rates = np.empty(500)
# pg_mu will be randomly chosen out of 25,75,125,...,425,475
pg_mu = 25 + random.randint(0, 9) * 50
for j in range(500):
rates[j] = pg_A * np.exp((-1 * (j - pg_mu)**2) / (2 * pg_sigma**2))
pg[j].rate = rates[j]*1.75
nest.Simulate(sim_interval)
##############################################################################
# Plot results
fig, ax = plt.subplots(1, sharex=False)
# Plot synapse weights of the synapses within the excitatory population
# Sort weights according to sender and reshape
exc_conns = nest.GetConnections(pop_exc, pop_exc)
exc_conns_senders = np.array(exc_conns.source)
exc_conns_targets = np.array(exc_conns.target)
exc_conns_weights = np.array(exc_conns.weight)
idx_array = np.argsort(exc_conns_senders)
targets = np.reshape(exc_conns_targets[idx_array], (10, 10 - 1))
weights = np.reshape(exc_conns_weights[idx_array], (10, 10 - 1))
# Sort according to target
for i, (trgs, ws) in enumerate(zip(targets, weights)):
idx_array = np.argsort(trgs)
weights[i] = ws[idx_array]
weight_matrix = np.zeros((10, 10))
tu9 = np.triu_indices_from(weights)
tl9 = np.tril_indices_from(weights, -1)
tu10 = np.triu_indices_from(weight_matrix, 1)
tl10 = np.tril_indices_from(weight_matrix, -1)
weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]]
weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]]
# Difference between initial and final value
init_w_matrix = np.ones((10, 10))*0.25
init_w_matrix -= np.identity(10)*0.25
cax = ax.imshow(weight_matrix - init_w_matrix)
cbarB = fig.colorbar(cax, ax=ax)
ax.set_xticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_yticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_xlabel("to neuron")
ax.set_ylabel("from neuron")
ax.set_title("Change of syn weights before and after simulation")
plt.show()
| gpl-2.0 |
appapantula/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
selective-inference/selective-inference | doc/learning_examples/HIV/stability_CV_6000.py | 3 | 3166 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
# load in the X matrix
from selection.tests.instance import HIV_NRTI
X_full = HIV_NRTI(datafile="NRTI_DATA.txt", standardize=False)[0]
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=6000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
n, p = X.shape
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth /= np.sqrt(n)
truth *= sigma
y = X.dot(truth) + sigma * np.random.standard_normal(n)
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
print(dispersion, sigma**2)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler(scale=0.5) # deterministic with scale=0
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(6, 10),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'HIV_stability_CV_6000.csv'
outbase = csvfile[:-4]
if df is not None or i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
if df is not None:
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
| bsd-3-clause |
evanthebouncy/nnhmm | radar_lstm/draw.py | 6 | 2538 | import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
# m = [[0.0, 1.47, 2.43, 3.44, 1.08, 2.83, 1.08, 2.13, 2.11, 3.7], [1.47, 0.0, 1.5, 2.39, 2.11, 2.4, 2.11, 1.1, 1.1, 3.21], [2.43, 1.5, 0.0, 1.22, 2.69, 1.33, 3.39, 2.15, 2.12, 1.87], [3.44, 2.39, 1.22, 0.0, 3.45, 2.22, 4.34, 2.54, 3.04, 2.28], [1.08, 2.11, 2.69, 3.45, 0.0, 3.13, 1.76, 2.46, 3.02, 3.85], [2.83, 2.4, 1.33, 2.22, 3.13, 0.0, 3.83, 3.32, 2.73, 0.95], [1.08, 2.11, 3.39, 4.34, 1.76, 3.83, 0.0, 2.47, 2.44, 4.74], [2.13, 1.1, 2.15, 2.54, 2.46, 3.32, 2.47, 0.0, 1.78, 4.01], [2.11, 1.1, 2.12, 3.04, 3.02, 2.73, 2.44, 1.78, 0.0, 3.57], [3.7, 3.21, 1.87, 2.28, 3.85, 0.95, 4.74, 4.01, 3.57, 0.0]]
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[20,20,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.savefig(name)
def draw_obs(obs, name):
ret_shape = [20, 20, 1]
ret = np.zeros(shape=ret_shape)
for ii, ob in enumerate(obs):
if ob.max() > 0.0:
idxx = np.unravel_index(ob.argmax(), ob.shape)
if idxx[-1] == 0:
ret[idxx[0]][idxx[1]] = 1.0 * ii
else:
ret[idxx[0]][idxx[1]] = -1.0 * ii
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,20])
ax.set_ylim([0,20])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_trace(trace, name):
x_coords = []
y_coords = []
anno = []
for i, stuff in enumerate(trace):
ob, inv = stuff
# x_coords.append(inv[0])
# y_coords.append(inv[1])
# anno.append("X"+str(i))
if ob != None:
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
| mit |
kmike/scikit-learn | examples/plot_lda_qda.py | 12 | 4758 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import pylab as pl
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
pl.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = pl.subplot(2, 2, fig_index)
if fig_index == 1:
pl.title('Linear Discriminant Analysis')
pl.ylabel('Data with fixed covariance')
elif fig_index == 2:
pl.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
pl.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
pl.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
pl.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
pl.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
pl.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = pl.xlim()
y_min, y_max = pl.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
pl.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
pl.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
pl.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
pl.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
pl.axis('tight')
pl.suptitle('LDA vs QDA')
pl.show()
| bsd-3-clause |
BoldingBruggeman/gotm | gui.py/xmlplot/data/gotmtext.py | 1 | 35659 | import os, StringIO
import numpy
import xmlstore.xmlstore
import xmlplot.common
class LinkedFileVariableStore(xmlplot.common.VariableStore,xmlstore.datatypes.DataFileEx):
# XML store-derived class for storing (cached) metadata of a data file,
# such as coordinate ranges.
# This is implemented as XML store (rather than Python object) because it
# needs to be saved in a descriptive form along with the data files themselves.
class DataFileCache(xmlstore.xmlstore.TypedStore):
@classmethod
def getSchemaInfo(cls):
return xmlstore.xmlstore.schemainfocache[os.path.join(xmlplot.common.getDataRoot(),'schemas/datafilecache')]
def __init__(self,valueroot=None,adddefault = True,schema=None):
if schema is None: schema = os.path.join(xmlplot.common.getDataRoot(),'schemas/datafilecache/0001.schema')
xmlstore.xmlstore.TypedStore.__init__(self,schema,valueroot,adddefault=adddefault)
class LinkedFileVariable(xmlplot.common.Variable):
def __init__(self,store,data,index):
xmlplot.common.Variable.__init__(self,store)
self.store = store
self.data = data
self.index = index
def getName_raw(self):
return self.data[0]
def getLongName(self):
return self.data[1]
def getUnit(self):
return self.data[2]
def getDimensions_raw(self):
return self.store.dimensionorder[:]
def getSlice(self,bounds):
assert False, 'This function must be implemented by inheriting class.'
@classmethod
def createTypedStore(ownclass):
return LinkedFileVariableStore.DataFileCache()
linkedfilename = 'linkedfile_metadata.xml'
rootnodename = 'DataFile'
@classmethod
def createObject(ownclass,datafile,context,infonode,nodename):
finfo = xmlstore.util.findDescendantNode(infonode,['fileinfo'])
assert finfo is not None, 'Node "%s" lacks "fileinfo" attribute.' % node
store = None
type = finfo.getAttribute('type')
if type=='pointsintime':
store = LinkedMatrix(datafile,context,infonode,nodename,type=0,dimensions={'time':{'label':'time','datatype':'datetime','preferredaxis':'x'}},dimensionorder=('time',))
elif type=='profilesintime':
store = LinkedProfilesInTime(datafile,context,infonode,nodename,dimensions={'time':{'label':'time','datatype':'datetime','preferredaxis':'x'},'z':{'label':'depth','unit':'m','preferredaxis':'y'}},dimensionorder=('time','z'))
elif type=='singleprofile':
store = LinkedMatrix(datafile,context,infonode,nodename,type=1)
else:
assert False, 'Linked file has unknown type "%s".' % node.type
return store
# Dictionary linking our data type names to MatPlotLib data types.
# Note that times are stored as numeric values (via matplotlib.dates.date2num)
mpldatatypes = {'datetime':numpy.float64,
'float': numpy.float32,
'float32': numpy.float32,
'float64': numpy.float64}
def __init__(self,datafile,context,infonode,nodename,dimensions={},dimensionorder=(),variables=[],datatype='float',defaultfilename='data'):
xmlplot.common.VariableStore.__init__(self)
xmlstore.datatypes.DataFileEx.__init__(self,datafile,context,infonode,nodename)
# Copy data from supplied dimensions and variables
self.dimensions = {}
for dimname,dimdata in dimensions.iteritems():
self.dimensions[dimname] = xmlplot.common.VariableStore.getDimensionInfo_raw(self,None)
self.dimensions[dimname].update(dimdata)
self.vardata = list(variables)
self.dimensionorder = list(dimensionorder)
# Supplement dimensions and variables with information in
# supplied XML node (if any)
self.filename = defaultfilename
if infonode is not None:
finfo = xmlstore.util.findDescendantNode(infonode,['fileinfo'])
self.filename = infonode.getAttribute('name')
if finfo.hasAttribute('datatype'): datatype = finfo.getAttribute('datatype')
# Get variables
fvars = xmlstore.util.findDescendantNode(finfo,['filevariables'])
if fvars is not None:
for ch in fvars.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='filevariable':
assert ch.hasAttribute('name'), '"name" attribute of filevariable is missing, label = %s.' % longname
name = ch.getAttribute('name')
unit = ch.getAttribute('unit')
if ch.hasAttribute('label'):
longname = ch.getAttribute('label')
else:
longname = name
self.vardata.append((name,longname,unit))
# Get dimensions
fdims = xmlstore.util.findDescendantNode(finfo,['filedimensions'])
if fdims is not None:
for ch in fdims.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='filedimension':
dimdata = xmlplot.common.VariableStore.getDimensionInfo_raw(self,None)
assert ch.hasAttribute('name'), '"name" attribute of filedimension is missing, label = "%s".' % ch.getAttribute('label')
id = ch.getAttribute('name')
if ch.hasAttribute('label'):
dimdata['label'] = ch.getAttribute('label')
else:
dimdata['label'] = id
if ch.hasAttribute('unit'): dimdata['unit'] = ch.getAttribute('unit')
if ch.hasAttribute('datatype'): dimdata['datatype'] = ch.getAttribute('datatype')
if ch.hasAttribute('preferredaxis'): dimdata['preferredaxis'] = ch.getAttribute('preferredaxis')
self.dimensions[id] = dimdata
self.dimensionorder.append(id)
self.data = None
self.datatype = datatype
def copy(self):
"""Returns a copy of the LinkedFileVariableStore object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedFileVariableStore(None,None,None,None,self.dimensions,self.dimensionorder,self.vardata,self.datatype,defaultfilename=self.filename)
def clear(self,clearfile=True):
"""Clears all data, and by default also clears the original datafile
(if any). The metadata set on the object will be updated accordingly.
"""
self.dataChanged(clearfile=clearfile)
def setDataFile(self,datafile=None,cleardata=True):
"""Attaches a new data file as source of data. This will clear all
metadata set on the object, and by default it will also clear any
parsed data.
"""
xmlstore.datatypes.DataFileEx.setDataFile(self,datafile)
if cleardata: self.data = None
def setData(self,data,clearfile=True):
"""Sets a new data block, automatically updating the metadata set on
the object. By default it will clear the original datafile (if any).
"""
self.data = data
self.dataChanged(clearfile=clearfile)
def dataChanged(self,clearfile=True):
"""Event handler, to be called just after the data has changed.
"""
if clearfile: self.setDataFile(None,cleardata=False)
if self.data is None: return
#print '%s - caching validation result and dimension boundaries.' % self.filename
metadata = self.getMetaData()
for dimname in self.getDimensionNames():
dimnode = metadata['Dimensions'].getChildById('Dimension',id=dimname,create=True)
assert dimnode is not None, 'Failed to create Dimension node for %s.' % dimname
dimrange = self.calculateDimensionRange(dimname)
if dimrange is None: continue
minval,maxval = dimrange
if self.getDimensionInfo_raw(dimname)['datatype']=='datetime':
dimnode['IsTimeDimension'].setValue(True)
dimnode['MinimumTime'].setValue(xmlplot.common.num2date(minval))
dimnode['MaximumTime'].setValue(xmlplot.common.num2date(maxval))
else:
dimnode['IsTimeDimension'].setValue(False)
dimnode['Minimum'].setValue(minval)
dimnode['Maximum'].setValue(maxval)
metadata['Valid'].setValue(True)
def getDimensionNames(self):
"""Returns the names of data dimensions.
"""
return self.dimensionorder[:]
def getDimensionInfo_raw(self,dimname):
"""Returns information on the specified data dimension.
see VariableStore.getDimensionInfo for the type of
information returned.
"""
return self.dimensions[dimname]
def getDimensionRange(self,dimname):
"""Returns the range, i.e., the tuple (minimum, maximum) of the
specified dimension.
"""
if self.data is None and (self.datafile is None or not self.datafile.isValid()): return None
metadata = self.getMetaData()
dimnode = metadata['Dimensions'].getChildById('Dimension',dimname)
if dimnode is None:
try:
self.getData()
except Exception,e:
pass
dimnode = metadata['Dimensions'].getChildById('Dimension',dimname)
assert dimnode is not None, 'Cannot locate node for dimension %s in data file cache.' % dimname
if metadata['Valid'].getValue()==False: return None
#print '%s - using cached bounds for %s.' % (self.filename,dimname)
if dimnode['IsTimeDimension'].getValue():
minval = dimnode['MinimumTime'].getValue()
maxval = dimnode['MaximumTime'].getValue()
else:
minval = dimnode['Minimum'].getValue()
maxval = dimnode['Maximum'].getValue()
if minval is None and maxval is None: return None
return (minval,maxval)
def hasExpensiveValidate(self):
return True
def validate(self,templatenode,callback=None):
if self.data is None and (self.datafile is None or not self.datafile.isValid()): return False
metadata = self.getMetaData()
valid = metadata['Valid'].getValue()
if valid is None:
try:
self.getData(callback=callback)
except Exception,e:
pass
valid = metadata['Valid'].getValue()
assert valid is not None, 'Information on validity of data file %s not in data file cache.' % self.filename
#print '%s - using cached validation result.' % self.filename
return valid
def getVariableNames_raw(self):
"""Returns the names of all variables in the store.
"""
return [data[0] for data in self.vardata]
def getVariableLongNames_raw(self):
"""Returns the long name of the specified variable.
"""
return dict([(data[0],data[1]) for data in self.vardata])
def getVariable_raw(self,varname):
"""Returns the specified variable as LinkedFileVariable object.
"""
for (index,data) in enumerate(self.vardata):
if data[0]==varname:
return self.variableclass(self,data,index)
return None
def loadFromFile(self,path):
datafile = xmlstore.datatypes.DataContainerDirectory.DataFileFile(path)
self.setDataFile(datafile)
datafile.release()
def saveToFile(self,path,callback=None):
"""Saves the current data to file."""
if self.datafile is not None:
self.datafile.saveToFile(path)
else:
f = open(path,'w')
self.writeData(f,callback=callback)
f.close()
def getDataFile(self,callback=None):
if self.datafile is None:
assert self.data is not None, 'getDataFile called with both the data file and the data in memory are not set.'
# Data not present as data file object. Create one in memory on the spot.
target = StringIO.StringIO()
self.writeData(target,callback=callback)
self.datafile = xmlstore.datatypes.DataFileMemory(target.getvalue(),self.filename+'.dat')
target.close()
return self.datafile.addref()
def writeData(self,target,callback=None):
"""Writes the current data to a file-like object."""
assert False, 'writeData must be implemented by derived class.'
def getData(self,callback=None):
if self.data is None and self.datafile is not None:
try:
data = self.parseDataFile(callback)
except Exception,e:
self.getMetaData()['Valid'].setValue(False)
raise
self.setData(data,clearfile=False)
return self.data
def parseDataFile(self,callback=None):
assert False, 'parseDataFile must be implemented by derived class.'
class LinkedMatrix(LinkedFileVariableStore):
class LinkedMatrixVariable(LinkedFileVariableStore.LinkedFileVariable):
def getSlice(self,bounds):
slice = self.Slice(self.getDimensions())
# Get a reference to all data, and stop if the coordinate dimension is empty.
data = self.store.getData()
if data[0].shape[0]==0: return slice
if slice.ndim==1:
slice.coords[0] = data[0][:]
slice.data = data[-1][:,self.index]
slice.generateStaggered()
return slice
def getShape(self):
data = self.store.getData()
if data[0].shape[0]==0: return tuple()
return data[-1][:,self.index].shape
def __init__(self,datafile=None,context=None,infonode=None,nodename=None,type=0,dimensions={},dimensionorder=(),variables=[],defaultfilename='data'):
LinkedFileVariableStore.__init__(self,datafile,context,infonode,nodename,dimensions,dimensionorder,variables,defaultfilename=defaultfilename)
self.variableclass = self.LinkedMatrixVariable
assert len(self.dimensions)<=1, 'Linkedmatrix objects can only be used with 0 or 1 coordinate dimensions, but %i are present.' % len(self.dimensions)
self.type = type
def copy(self):
"""Returns a copy of the LinkedMatrix object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedMatrix(dimensions=self.dimensions,dimensionorder=self.dimensionorder,variables=self.vardata,type=self.type,defaultfilename=self.filename)
def clear(self,clearfile=True):
"""Clears all contained data."""
self.data = []
if len(self.dimensions)==1:
dimdatatype = self.dimensions[self.dimensionorder[0]]['datatype']
self.data.append(numpy.empty((0,),self.mpldatatypes[dimdatatype]))
self.data.append(numpy.empty((0,len(self.vardata)),self.mpldatatypes[self.datatype]))
LinkedFileVariableStore.clear(self,clearfile=clearfile)
def calculateDimensionRange(self,dimname):
ind = self.dimensionorder.index(dimname)
dimdata = self.getData()[ind]
if 0 in dimdata.shape: return None
return (dimdata.min(),dimdata.max())
def parseDataFile(self,callback=None):
if self.datafile is None or not self.datafile.isValid(): return None
if self.type==0:
# Unknown number of rows
res = self.loadDataFile_UnknownCount(callback)
elif self.type==1:
# Known number of rows
res = self.loadDataFile_KnownCount(callback)
else:
assert False, 'unknown LinkedMatrix type %i.' % self.type
return res
def loadDataFile_KnownCount(self,callback):
"""Load a data from a DataFile object with the number of lines listed on the first line.
"""
# Get number of dimensions and variables.
dimcount = len(self.dimensions)
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
# First line contains number of observations to follow.
line = f.readline()
if line=='':
raise Exception('File is empty. Expected number of observations on first line.')
obscount = int(line)
# Allocate arrays for storage of coordinates and variable values
values = numpy.empty((obscount,varcount),self.mpldatatypes[self.datatype])
if dimcount==1:
# One coordinate dimension present; allocate an array for its values.
dimtype = self.dimensions[self.dimensionorder[0]]['datatype']
dimisdate = (dimtype=='datetime')
if dimisdate:
prevdate = None
dimvalues = numpy.empty((obscount,),self.mpldatatypes[dimtype])
for irow in range(values.shape[0]):
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='':
raise Exception('End-of-file reached after line %i, but expecting still %i more rows of observations.' % (irow+1,values.shape[0]-irow))
iline = irow+2 # One-based line index
if dimcount==1:
if dimisdate:
# Read the date + time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
dimvalue = xmlstore.util.dateTimeFromTuple(refvals)
if prevdate is not None and dimvalue<prevdate:
raise Exception('Line %i: observation time %s lies before previous observation time %s. Times should be increasing.' % (iline,xmlstore.util.formatDateTime(dimvalue),xmlstore.util.formatDateTime(prevdate)))
prevdate = dimvalue
dimvalue = xmlplot.common.date2num(dimvalue)
# Read variable values.
data = line[19:].split()
else:
# Split line, convert values to floats and store first as coordinate.
data = map(float,line.split())
dimvalue = data.pop(0)
else:
data = map(float,line.split())
if len(data)<varcount:
raise Exception('Line %i contains only %i observations, where %i are expected (%s).' % (iline,len(data),varcount,', '.join([d[1] for d in self.vardata])))
# Store time and values.
if dimcount==1: dimvalues[irow] = dimvalue
values[irow,:] = data[:varcount]
# Inform caller about progress
if callback is not None and iline%1000==0:
progress = None
if filesize is not None:
try:
progress = float(f.tell())/filesize
except AttributeError:
progress = None
callback(progress,'read %i lines.' % iline)
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
if dimcount==1:
return [dimvalues,values]
else:
return [values]
def loadDataFile_UnknownCount(self,callback):
"""Load a data file with the number of lines not known in advance.
"""
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
# Get the data type to use for the dimension
dimdatatype = self.dimensions[self.dimensionorder[0]]['datatype']
# Size of one memory slab (roughly equivalent to 1 MB in memory)
buffersize = 125000/(varcount+1)
times = []
values = []
iline = 0
while True:
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='': break
# Calculate position in current memory slab, create new slab if needed.
ipos = iline%buffersize
if ipos==0:
times.append(numpy.empty((buffersize,), self.mpldatatypes[dimdatatype]))
values.append(numpy.empty((buffersize,varcount),self.mpldatatypes[self.datatype]))
# Increment current line number
iline += 1
# Read the date + time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
curdate = xmlstore.util.dateTimeFromTuple(refvals)
times[-1][ipos] = xmlplot.common.date2num(curdate)
# Read values.
data = line[19:].split()
if len(data)<varcount:
raise Exception('Line %i contains only %i observations, where %i are expected (%s).' % (iline,len(data),varcount,', '.join([d[1] for d in self.vardata])))
values[-1][ipos,:] = map(float,data[:varcount])
# Inform caller about progress
if callback is not None and iline%1000==0:
progress = None
if filesize is not None:
try:
progress = float(f.tell())/filesize
except AttributeError:
progress = None
callback(progress,'read %i lines.' % iline)
if len(times)>0:
# Delete unused rows in last memory slab.
times [-1] = times [-1][0:iline%buffersize]
values[-1] = values[-1][0:iline%buffersize,:]
# Concatenate memory slab.
times = numpy.concatenate(times,axis=0)
values = numpy.concatenate(values,axis=0)
else:
# No data read: create empty time and value arrays
times = numpy.zeros((0,),self.mpldatatypes[dimdatatype])
values = numpy.zeros((0,varcount),self.mpldatatypes[self.datatype])
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
return [times,values]
def writeData(self,target,callback=None,missing=''):
"""Writes the current data to a file-like object."""
# Get number of dimensions and variables, and get shortcuts to the data.
dimcount = len(self.dimensions)
data = self.getData()
if dimcount==1:
# One coordinate dimension present; get the data type of that dimension.
dimdata = data[0]
dimtype = self.dimensions.values()[0]['datatype']
dimisdate = (dimtype=='datetime')
if dimisdate: dimdata = xmlplot.common.num2date(dimdata)
varcount = len(self.vardata)
vardata = data[-1]
# Get the mask of the data (numpy.ma.nomask if not set)
mask = numpy.ma.getmask(vardata)
if self.type==1:
# Write first line with number of observations.
target.write('%i\n' % vardata.shape[0])
# Write lines with observations.
for iline in range(vardata.shape[0]):
if dimcount==1:
if dimisdate:
target.write(xmlstore.util.formatDateTime(dimdata[iline],iso=True))
else:
target.write('%.12g' % dimdata[iline])
for ivar in range(varcount):
if mask is not numpy.ma.nomask and mask[iline,ivar]:
target.write('\t%s' % missing)
else:
target.write('\t%.12g' % vardata[iline,ivar])
target.write('\n')
if callback is not None and iline%1000==0:
callback(float(iline)/vardata.shape[0],'wrote %i lines.' % iline)
class LinkedProfilesInTime(LinkedFileVariableStore):
class LinkedProfilesInTimeVariable(LinkedFileVariableStore.LinkedFileVariable):
def getSlice(self,bounds):
varslice = self.Slice(self.getDimensions())
data = self.store.getGriddedData()
if data[0].shape[0]==0: return varslice
timebounds = xmlplot.common.findIndices((bounds[0].start,bounds[0].stop),data[0])
varslice.coords[0] = data[0][timebounds[0]:timebounds[1]+1]
varslice.coords[1] = data[1]
varslice.data = data[2][timebounds[0]:timebounds[1]+1,:,self.index]
varslice.generateStaggered()
return varslice
def getShape(self):
data = self.store.getGriddedData()
if data[0].shape[0]==0: return tuple()
return data[-1][:,:,self.index].shape
def __init__(self,datafile,context,infonode,nodename,dimensions=[],dimensionorder=(),variables=[],defaultfilename='data'):
LinkedFileVariableStore.__init__(self,datafile,context,infonode,nodename,dimensions,dimensionorder,variables,defaultfilename=defaultfilename)
self.variableclass = self.LinkedProfilesInTimeVariable
def copy(self):
"""Returns a copy of the LinkedProfilesInTime object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedProfilesInTime(None,None,None,None,dimensions=self.dimensions,dimensionorder=self.dimensionorder,variables=self.vardata,defaultfilename=self.filename)
def setDataFile(self,datafile=None,cleardata=True):
LinkedFileVariableStore.setDataFile(self,datafile,cleardata=cleardata)
if cleardata: self.griddeddata = None
def clear(self,clearfile=True):
self.data = [numpy.empty((0,)),[],[]]
LinkedFileVariableStore.clear(self,clearfile=clearfile)
def dataChanged(self,clearfile=True):
"""Event handler, must be called by external actors when they change the data."""
self.griddeddata = None
LinkedFileVariableStore.dataChanged(self,clearfile=clearfile)
def calculateDimensionRange(self,dimname):
ind = self.dimensionorder.index(dimname)
dimdata = self.getData()[ind]
if len(dimdata)==0: return None
if ind==0:
return (dimdata.min(),dimdata.max())
else:
dimmin,dimmax = None,None
for curdata in dimdata:
if 0 in curdata.shape: continue
curmin,curmax = curdata.min(),curdata.max()
if dimmin is None or curmin<dimmin: dimmin = curmin
if dimmax is None or curmax>dimmax: dimmax = curmax
return (dimmin,dimmax)
def writeData(self,target,callback=None):
"""Writes the current data to a file-like object."""
varcount = len(self.vardata)
data = self.getData()
assert data is not None, 'Cannot write data to file, because data is set to None.'
times,depths,values = data
for itime in range(times.shape[0]):
target.write(xmlstore.util.formatDateTime(xmlplot.common.num2date(times[itime]),iso=True))
curdepths = depths[itime]
curdata = values[itime]
depthcount = len(curdepths)
target.write('\t%i\t1\n' % depthcount)
for idepth in range(depthcount):
target.write('%.9g' % curdepths[idepth])
for ivar in range(varcount):
target.write('\t%.9g' % curdata[idepth,ivar])
target.write('\n')
def getGriddedData(self,callback=None):
data = self.getData()
if self.griddeddata is None:
# Select only non-empty profiles
times,depths,values = [],[],[]
for t,d,v in zip(*data):
if 0 not in d.shape:
times.append(t)
depths.append(d)
values.append(v)
times = numpy.array(times,dtype=data[0].dtype)
varcount = len(self.vardata)
# Find unique depth levels.
uniquedepths = set()
for ds in depths:
for d in ds: uniquedepths.add(d)
# Create depth grid to interpolate on to. Use the observation depths if less than 200,
# otherwise create a equidistant 200-point grid between the minimum and maximum depth.
uniquedepths = list(uniquedepths)
uniquedepths.sort()
if len(uniquedepths)<200:
depthdatatype = self.dimensions[self.dimensionorder[1]]['datatype']
depthgrid = numpy.array(uniquedepths,self.mpldatatypes[depthdatatype])
else:
depthgrid = numpy.linspace(uniquedepths[0],uniquedepths[-1],200)
# Grid observed profiles to depth grid.
griddedvalues = numpy.empty((times.shape[0],depthgrid.shape[0],varcount),self.mpldatatypes[self.datatype])
for it in range(len(times)):
griddedvalues[it,:,:] = xmlplot.common.interp1(depths[it],values[it],depthgrid)
if callback is not None and (it+1)%20==0:
callback(float(it+1)/len(times),'gridded %i profiles.' % (it+1))
# Store time grid, depth grid and observations.
self.griddeddata = (times,depthgrid,griddedvalues)
return self.griddeddata
def parseDataFile(self,callback=None):
if self.datafile is None or not self.datafile.isValid(): return None
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
times = []
depths = []
values = []
iline = 0
while True:
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='': break
iline += 1
# Read date & time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
curdate = xmlstore.util.dateTimeFromTuple(refvals)
curdate = xmlplot.common.date2num(curdate)
# Get the number of observations and the depth direction.
(depthcount,updown) = map(int, line[19:].split())
# Create arrays that will contains depths and observed values.
depthdatatype = self.dimensions[self.dimensionorder[1]]['datatype']
curdepths = numpy.empty((depthcount,),self.mpldatatypes[depthdatatype])
curvalues = numpy.empty((depthcount,varcount),self.mpldatatypes[self.datatype])
# Depths can be increasing (updown==1) or decreasing (updown!=1)
if updown==1:
depthindices = range(0,depthcount,1)
else:
depthindices = range(depthcount-1,-1,-1)
# Now parse the specified number of observations to create the profiles.
prevdepth = None
for idepthline in depthindices:
if callback is not None and iline%1000==0:
pos = f.tell()
callback(pos/filesize,'processed %i lines.' % iline)
# Read line
line = f.readline()
if line=='':
raise Exception('Premature end-of-file after line %i; expected %i more observations.' % (iline,depthcount-depthindices.index(idepthline)))
iline += 1
# Read values (depth followed by data) and check.
try:
linedata = map(float,line.split())
except ValueError,e:
raise Exception('Line %i: %s' % (iline,e))
if len(linedata)<varcount+1:
raise Exception('Line %i contains only %i value(s), where %i (1 depth and %i observations) are expected.' % (iline,len(linedata),varcount+1,varcount))
if prevdepth is not None:
if linedata[0]==prevdepth:
raise Exception('Found duplicate observation for depth %.4f at line %i.' % (linedata[0],iline))
if updown==1:
if linedata[0]<prevdepth:
raise Exception('Observation depth decreases from %.4f to %.4f at line %i, but the profile depth was set to increase from first to last observation.' % (prevdepth,linedata[0],iline))
elif linedata[0]>prevdepth:
raise Exception('Observation depth increases from %.4f to %.4f at line %i, but the profile depth was set to decrease from first to last observation.' % (prevdepth,linedata[0],iline))
prevdepth = linedata[0]
# Store current observation
curdepths[idepthline] = linedata[0]
curvalues[idepthline,:] = linedata[1:varcount+1]
# Append the profiles for the current time to the list.
times.append(curdate)
depths.append(curdepths)
values.append(curvalues)
# Inform caller about progress.
if callback is not None and iline%1000==0:
pos = f.tell()
callback(pos/filesize,'processed %i lines.' % iline)
# Convert sequence with times to numpy array.
timedatatype = self.dimensions[self.dimensionorder[0]]['datatype']
times = numpy.array(times,self.mpldatatypes[timedatatype])
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
return [times,depths,values]
| gpl-2.0 |
RayMick/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
liuwenf/moose | modules/porous_flow/doc/tests/dispersion.py | 14 | 1881 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
import pandas as pd
#
# Diffusion-only test
#
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/diff01_out_xmass_0021.csv")
# The analytical solution is erfc(u) where u is a similarity variable
x = np.linspace(0,10,100)
t = 20
d = 1
tau = 0.1
D = d*tau
u = x/(2*np.sqrt(D*t))
plt.figure(1)
plt.plot(x, erfc(u), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 50 s)')
plt.ylim([-0.05,1])
plt.savefig("diffusion_fig.pdf")
#
# Dispersion tests
#
def expected(x,t):
porosity = 0.3
alphal = 0.2
v = 1.05e-3 / porosity
D = alphal * v
return 0.5 * erfc((x - v * t)/(2 *np.sqrt(D * t))) + np.sqrt(v * v * t/(np.pi * D)) * \
np.exp(- (x - v * t)**2/(4 * D * t)) - 0.5 * (1 + v * x / D + v * v * t / D) * np.exp(v * x / D) *\
erfc((x+v*t)/(2*np.sqrt(D*t)))
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/disp01_out_xmass_0029.csv")
plt.figure(2)
plt.plot(x, expected(x, 1e3), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 1000 s)')
plt.ylim([-0.05,1])
plt.savefig("dispersion_fig.pdf")
#
# Heavy dispersion test
#
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/disp01_heavy_out_xmass_0105.csv")
plt.figure(3)
plt.plot(x, expected(x, 1e3), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE', markevery=4)
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 1000 s)')
plt.ylim([-0.05,1])
plt.savefig("dispersion_heavy_fig.pdf")
sys.exit(0)
| lgpl-2.1 |
zifeo/nest-simulator | topology/pynest/tests/test_plotting.py | 13 | 4111 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic topology hl_api functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
try:
import matplotlib.pyplot as plt
plt.figure() # make sure we can open a window; on Jenkins, DISPLAY is not set
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE, 'Plotting is impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.PlotLayer(l)
self.assertTrue(True)
def test_PlotTargets(self):
"""Test plotting targets."""
ldict = {'elements': ['iaf_neuron', 'iaf_psc_alpha'], 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'grid': {'rows':2, 'columns':2}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
ian = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_neuron']
ipa = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_psc_alpha']
# connect ian -> all using static_synapse
cdict.update({'sources': {'model': 'iaf_neuron'},
'synapse_model': 'static_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'synapse_model']: cdict.pop(k)
# connect ipa -> ipa using stdp_synapse
cdict.update({'sources': {'model': 'iaf_psc_alpha'},
'targets': {'model': 'iaf_psc_alpha'},
'synapse_model': 'stdp_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'targets', 'synapse_model']: cdict.pop(k)
ctr = topo.FindCenterElement(l)
fig = topo.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
self.assertTrue(True)
def test_PlotKernel(self):
"""Test plotting kernels."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
f = plt.figure()
a1 = f.add_subplot(221)
ctr = topo.FindCenterElement(l)
topo.PlotKernel(a1, ctr, {'circular': {'radius': 1.}}, {'gaussian': {'sigma':0.2}})
a2 = f.add_subplot(222)
topo.PlotKernel(a2, ctr, {'doughnut': {'inner_radius': 0.5, 'outer_radius':0.75}})
a3 = f.add_subplot(223)
topo.PlotKernel(a3, ctr, {'rectangular': {'lower_left': [-.5,-.5],
'upper_right':[0.5,0.5]}})
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
import matplotlib.pyplot as plt
plt.show()
| gpl-2.0 |
kcavagnolo/astroML | book_figures/chapter6/fig_great_wall_MST.py | 3 | 5216 | """
Euclidean Minimum Spanning Tree
-------------------------------
Figure 6.15
An approximate Euclidean minimum spanning tree over the two-dimensional
projection of the SDSS Great Wall. The upper panel shows the input points, and
the middle panel shows the dendrogram connecting them. The lower panel shows
clustering based on this dendrogram, created by removing the largest 10% of the
graph edges, and keeping the remaining connected clusters with 30 or more
members.
Additional information
~~~~~~~~~~~~~~~~~~~~~~
This figure is based on the data presented in Figure 1 of Cowan & Ivezic
(2008). A similar figure appears in the book
"Statistics, Data Mining, and Machine Learning in Astronomy", by
Ivezic, Connolly, Vanderplas, and Gray (2013).
The three panels of this figure show a hierarchical clustering of a subset
of galaxies from the Sloan Digital Sky Survey (SDSS). This region is known
as the "SDSS Great Wall", and contains an extended cluster of several thousand
galaxies approximately 300Mpc (about 1 billion light years) from earth. The
top panel shows the positions of over 8,000 galaxies projected to a 2D plane
with Earth at the point (0, 0). The middle panel shows a dendrogram
representation of a Euclidean Minimum Spanning Tree (MST) over the galaxy
locations. By eliminating edges of a MST which are greater than a given
length, we can measure the amount of clustering at that scale: this is one
version of a class of models known as Hierarchical Clustering. The bottom
panel shows the results of this clustering approach for an edge cutoff of
3.5Mpc, along with a Gaussian Mixture Model fit to the distribution within
each cluster.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse
from sklearn.mixture import GMM
from astroML.clustering import HierarchicalClustering, get_graph_segments
from astroML.datasets import fetch_great_wall
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# get data
X = fetch_great_wall()
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Compute the MST clustering model
n_neighbors = 10
edge_cutoff = 0.9
cluster_cutoff = 10
model = HierarchicalClustering(n_neighbors=10,
edge_cutoff=edge_cutoff,
min_cluster_size=cluster_cutoff)
model.fit(X)
print(" scale: %2g Mpc" % np.percentile(model.full_tree_.data,
100 * edge_cutoff))
n_components = model.n_components_
labels = model.labels_
#------------------------------------------------------------
# Get the x, y coordinates of the beginning and end of each line segment
T_x, T_y = get_graph_segments(model.X_train_,
model.full_tree_)
T_trunc_x, T_trunc_y = get_graph_segments(model.X_train_,
model.cluster_graph_)
#------------------------------------------------------------
# Fit a GMM to each individual cluster
Nx = 100
Ny = 250
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
density = np.zeros(Xgrid.shape[0])
for i in range(n_components):
ind = (labels == i)
Npts = ind.sum()
Nclusters = min(12, Npts // 5)
gmm = GMM(Nclusters, random_state=0).fit(X[ind])
dens = np.exp(gmm.score(Xgrid))
density += dens / dens.max()
density = density.reshape((Ny, Nx))
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 6))
fig.subplots_adjust(hspace=0, left=0.1, right=0.95, bottom=0.1, top=0.9)
ax = fig.add_subplot(311, aspect='equal')
ax.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('(Mpc)')
ax = fig.add_subplot(312, aspect='equal')
ax.plot(T_y, T_x, c='k', lw=0.5)
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('(Mpc)')
ax = fig.add_subplot(313, aspect='equal')
ax.plot(T_trunc_y, T_trunc_x, c='k', lw=0.5)
ax.imshow(density.T, origin='lower', cmap=plt.cm.hot_r,
extent=[ymin, ymax, xmin, xmax])
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.set_xlabel('(Mpc)')
ax.set_ylabel('(Mpc)')
plt.show()
| bsd-2-clause |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| gpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e288.py | 2 | 5039 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
4000: 5e-05,
8000: 1e-05
# 3000: 5e-06,
# 4000: 1e-06,
# 10000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
etraiger/PCWG | plots.py | 2 | 15226 | import os
import pandas as pd
from Analysis import chckMake
np = pd.np
class MatplotlibPlotter(object):
def __init__(self,path, analysis):
self.path = path
self.analysis = analysis
def plot_multiple(self, windSpeedCol, powerCol, meanPowerCurveObj):
try:
from matplotlib import pyplot as plt
plt.ioff()
plotTitle = "Power Curve"
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][meanPowerCurveObj.powerCurveLevels['Data Count'] > 0 ].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(color='#00FF00',alpha=0.95,linestyle='--',label='Mean Power Curve')
colourmap = plt.cm.gist_ncar
colours = [colourmap(i) for i in np.linspace(0, 0.9, len(self.analysis.dataFrame[self.analysis.nameColumn].unique()))]
for i,name in enumerate(self.analysis.dataFrame[self.analysis.nameColumn].unique()):
ax = self.analysis.dataFrame[self.analysis.dataFrame[self.analysis.nameColumn] == name].plot(ax = ax, kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.2, label=name, color = colours[i])
ax.legend(loc=4, scatterpoints = 1)
ax.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(windSpeedCol + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
file_out = self.path + "/Multiple Dataset PowerCurve - " + powerCol + " vs " + windSpeedCol + ".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a power curve scatter chart for multiple data source (%s). Couldn't." % meanPowerCurveObj.name
def plotPowerCurveSensitivityVariationMetrics(self):
try:
from matplotlib import pyplot as plt
plt.ioff()
(self.analysis.powerCurveSensitivityVariationMetrics*100.).plot(kind = 'bar', title = 'Summary of Power Curve Variation by Variable. Significance Threshold = %.2f%%' % (self.analysis.sensitivityAnalysisThreshold * 100), figsize = (12,8))
plt.ylabel('Variation Metric (%)')
file_out = self.path + os.sep + 'Power Curve Sensitivity Analysis Variation Metric Summary.png'
plt.savefig(file_out)
plt.close('all')
except:
print "Tried to plot summary of Power Curve Sensitivity Analysis Variation Metric. Couldn't."
self.analysis.powerCurveSensitivityVariationMetrics.to_csv(self.path + os.sep + 'Power Curve Sensitivity Analysis Variation Metric.csv')
def plotPowerCurveSensitivity(self, sensCol):
try:
df = self.analysis.powerCurveSensitivityResults[sensCol].reset_index()
from matplotlib import pyplot as plt
plt.ioff()
fig = plt.figure(figsize = (12,5))
fig.suptitle('Power Curve Sensitivity to %s' % sensCol)
ax1 = fig.add_subplot(121)
ax1.hold(True)
ax2 = fig.add_subplot(122)
ax2.hold(True)
power_column = self.analysis.measuredTurbulencePower if self.analysis.turbRenormActive else self.analysis.actualPower
for label in self.analysis.sensitivityLabels.keys():
filt = df['Bin'] == label
ax1.plot(df['Wind Speed Bin'][filt], df[power_column][filt], label = label, color = self.analysis.sensitivityLabels[label])
ax2.plot(df['Wind Speed Bin'][filt], df['Energy Delta MWh'][filt], label = label, color = self.analysis.sensitivityLabels[label])
ax1.set_xlabel('Wind Speed (m/s)')
ax1.set_ylabel('Power (kW)')
ax2.set_xlabel('Wind Speed (m/s)')
ax2.set_ylabel('Energy Difference from Mean (MWh)')
box1 = ax1.get_position()
box2 = ax2.get_position()
ax1.set_position([box1.x0 - 0.05 * box1.width, box1.y0 + box1.height * 0.17,
box1.width * 0.95, box1.height * 0.8])
ax2.set_position([box2.x0 + 0.05 * box2.width, box2.y0 + box2.height * 0.17,
box2.width * 1.05, box2.height * 0.8])
handles, labels = ax1.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center', ncol = len(self.analysis.sensitivityLabels.keys()), fancybox = True, shadow = True)
file_out = self.path + os.sep + 'Power Curve Sensitivity to %s.png' % sensCol
chckMake(self.path)
fig.savefig(file_out)
plt.close()
except:
print "Tried to make a plot of power curve sensitivity to %s. Couldn't." % sensCol
def plotBy(self,by,variable,df):
import turbine
if not isinstance(df,turbine.PowerCurve):
kind = 'scatter'
else:
kind = 'line'
df=df.powerCurveLevels[df.powerCurveLevels['Input Hub Wind Speed'] <= self.analysis.allMeasuredPowerCurve.cutOutWindSpeed]
try:
from matplotlib import pyplot as plt
plt.ioff()
ax = df.plot(kind=kind,x=by ,y=variable,title=variable+" By " +by,alpha=0.6,legend=None)
ax.set_xlim([df[by].min()-1,df[by].max()+1])
ax.set_xlabel(by)
ax.set_ylabel(variable)
file_out = self.path + "/"+variable.replace(" ","_")+"_By_"+by.replace(" ","_")+".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a " + variable.replace(" ","_") + "_By_"+by.replace(" ","_")+" chart. Couldn't."
def plotPowerCurve(self, windSpeedCol, powerCol, meanPowerCurveObj, anon = False, row_filt = None, fname = None, show_analysis_pc = True, mean_title = 'Mean Power Curve', mean_pc_color = '#00FF00'):
try:
from matplotlib import pyplot as plt
plt.ioff()
df = self.analysis.dataFrame.loc[row_filt, :] if row_filt is not None else self.analysis.dataFrame
if (windSpeedCol == self.analysis.densityCorrectedHubWindSpeed) or ((windSpeedCol == self.analysis.inputHubWindSpeed) and (self.analysis.densityCorrectionActive)):
plotTitle = "Power Curve (corrected to {dens} kg/m^3)".format(dens=self.analysis.referenceDensity)
else:
plotTitle = "Power Curve"
ax = df.plot(kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.15, label='Filtered Data')
if self.analysis.specifiedPowerCurve is not None:
has_spec_pc = len(self.analysis.specifiedPowerCurve.powerCurveLevels.index) != 0
else:
has_spec_pc = False
if has_spec_pc:
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
if self.analysis.specifiedPowerCurve != self.analysis.powerCurve:
if ((self.analysis.powerCurve.name != 'All Measured') and show_analysis_pc):
ax = self.analysis.powerCurve.powerCurveLevels.sort_index()['Actual Power'].plot(ax = ax, color='#A37ACC',alpha=0.9,label=self.analysis.powerCurve.name)
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][self.analysis.allMeasuredPowerCurve.powerCurveLevels.loc[meanPowerCurveObj.powerCurveLevels.index, 'Data Count'] > 0].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(ax = ax,color=mean_pc_color,alpha=0.95,linestyle='--',
label=mean_title)
ax.legend(loc=4, scatterpoints = 1)
if has_spec_pc:
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
else:
ax.set_xlim([min(df[windSpeedCol].min(),meanPowerCurve.index.min()), max(df[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(self.analysis.inputHubWindSpeedSource + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
if anon:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fname = ("PowerCurve - " + powerCol + " vs " + windSpeedCol + ".png") if fname is None else fname
file_out = self.path + os.sep + fname
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
raise
print "Tried to make a power curve scatter chart for %s. Couldn't." % meanPowerCurveObj.name
def plotTurbCorrectedPowerCurve(self, windSpeedCol, powerCol, meanPowerCurveObj):
try:
from matplotlib import pyplot as plt
plt.ioff()
if (windSpeedCol == self.analysis.densityCorrectedHubWindSpeed) or ((windSpeedCol == self.analysis.inputHubWindSpeed) and (self.analysis.densityCorrectionActive)):
plotTitle = "Power Curve (corrected to {dens} kg/m^3)".format(dens=self.analysis.referenceDensity)
else:
plotTitle = "Power Curve"
ax = self.analysis.dataFrame.plot(kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.15, label='Filtered Data')
if self.analysis.specifiedPowerCurve is not None:
has_spec_pc = len(self.analysis.specifiedPowerCurve.powerCurveLevels.index) != 0
else:
has_spec_pc = False
if has_spec_pc:
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][self.analysis.allMeasuredPowerCurve.powerCurveLevels['Data Count'] > 0 ].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(ax = ax,color='#00FF00',alpha=0.95,linestyle='--',
label='Mean Power Curve')
ax2 = ax.twinx()
if has_spec_pc:
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
ax2.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
else:
ax.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax2.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(self.analysis.inputHubWindSpeedSource + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
refTurbCol = 'Specified Turbulence' if self.analysis.powerCurveMode == 'Specified' else self.analysis.hubTurbulence
ax2.plot(self.analysis.powerCurve.powerCurveLevels.sort_index().index, self.analysis.powerCurve.powerCurveLevels.sort_index()[refTurbCol] * 100., 'm--', label = 'Reference TI')
ax2.set_ylabel('Reference TI (%)')
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc=4, scatterpoints = 1)
file_out = self.path + "/PowerCurve TI Corrected - " + powerCol + " vs " + windSpeedCol + ".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a TI corrected power curve scatter chart for %s. Couldn't." % meanPowerCurveObj.name
def plotPowerLimits(self):
try:
from matplotlib import pyplot as plt
plt.ioff()
windSpeedCol = self.analysis.densityCorrectedHubWindSpeed
ax = self.analysis.dataFrame.plot(kind='scatter',x=windSpeedCol,y=self.analysis.actualPower ,title="Power Values Corrected to {dens} kg/m^3".format(dens=self.analysis.referenceDensity),alpha=0.5,label='Power Mean')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power Min",alpha=0.2,label='Power Min',color = 'orange')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power Max",alpha=0.2,label='Power Max',color = 'green')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power SD",alpha=0.2,label='Power SD',color = 'purple')
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
ax.legend(loc=4, scatterpoints = 1)
ax.set_xlabel(windSpeedCol)
ax.set_ylabel("Power [kW]")
file_out = self.path + "/PowerValues.png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a full power scatter chart. Couldn't."
def plotCalibrationSectors(self):
for datasetConf in self.analysis.datasetConfigs:
try:
from matplotlib import pyplot as plt
plt.ioff()
df = datasetConf.data.calibrationCalculator.calibrationSectorDataframe[['pctSpeedUp','LowerLimit','UpperLimit']].rename(columns={'pctSpeedUp':'% Speed Up','LowerLimit':"IEC Lower",'UpperLimit':"IEC Upper"})
df.plot(kind = 'line', title = 'Variation of wind speed ratio with direction', figsize = (12,8))
plt.ylabel('Wind Speed Ratio (Vturb/Vref) as %')
file_out = self.path + os.sep + 'Wind Speed Ratio with Direction - All Sectors {nm}.png'.format(nm=datasetConf.name)
plt.savefig(file_out)
df = df.loc[np.logical_and(df.index > datasetConf.data.fullDataFrame[datasetConf.data.referenceDirectionBin].min()-5.0 , df.index < datasetConf.data.fullDataFrame[datasetConf.data.referenceDirectionBin].max()+5.0),:]
df.plot(kind = 'line', title = 'Variation of wind speed ratio with direction', figsize = (12,8))
plt.ylabel('Wind Speed Ratio (Vturb/Vref) as %')
file_out = self.path + os.sep + 'Wind Speed Ratio with Direction - Selected Sectors {nm}.png'.format(nm=datasetConf.name)
chckMake(self.path)
plt.savefig(file_out)
plt.close('all')
except:
print "Tried to plot variation of wind speed ratio with direction. Couldn't."
| mit |
BillyLiggins/fitting | first.py | 1 | 7031 | import copy
import echidna
import echidna.output.plot as plot
import echidna.core.spectra as spectra
from echidna.output import store
import matplotlib.pyplot as plt
import argparse
import glob
import numpy as np
import os
def convertor(path):
flist=np.array(glob.glob(path))
for ntuple in flist:
os.system("python ~/echidna/echidna/scripts/dump_spectra_ntuple.py -c ~/workspace/PhD/fitting/config.yml -f "+ str(ntuple)+" -s hdf5/")
def combinerNtuple(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.fill_from_ntuple(hdf5)
first = False
else:
spectrum2 = store.fill_from_ntuple(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
def combiner(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.load(hdf5)
first = False
else:
spectrum2 = store.load(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
"""The way you should do it is to define a lot of spectra and then plot them.
You don't really know how to normlise the histrogram or indeed weather that is of any uses in the first
place.
"""
def slicer(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 0.6,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name=str(i*1000)+"mm to "+str((i+1)*1000)+"mm"
print type(spec2)
filler.append(spec2)
def slicerMC(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_mc_low": 0.,
"energy_mc_high": 1,
"radial_mc_low": i*6000.0/nslice,
"radial_mc_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="MC"
print type(spec2)
print "This gives the number os events in each window:"
print "mc : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def slicerReco(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 1.,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="Reco"
print type(spec2)
print "This gives the number os events in each window:"
print "reco : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def signalPlotter(spectra,dim,name):
i=0
for spec in spectra:
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel=str(dim)+" [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project(dim),histtype="stepfilled", color="RoyalBlue",label=spec._name)
fig.savefig("slice_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
i=1+i
def combiPlotter(spectra,dim,name):
i=0
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
for spec in spectra:
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in 1000mm slices",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="energy_reco"+ " [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project("energy_reco"),label=spec._name,histtype='step')
ax.set_ylim([0,0.03])
ax.set_xlim([0.2,0.7])
ax.legend(loc="best")
fig.savefig("combined_"+str(name)+".png")
def func(path,nslice,name):
spectra=[]
slicer(path,spectra,nslice)
signalPlotter(spectra,"energy_reco",name)
combiPlotter(spectra,"energy_reco",name)
def po210():
convertor("po210_ntuple/*")
combiner("hdf5/SolarPo**ntuple*","hdf5/SolarPo210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarPo210_combined.hdf5",6,"po210")
def bi210():
convertor("bi210_ntuple/*")
combiner("hdf5/SolarBi**ntuple*","hdf5/SolarBi210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarBi210_combined.hdf5",6,"bi210")
def compair(spectrumPathReco,spectrumPathMC,name):
spectraReco=[]
spectraMC=[]
slicerReco(spectrumPathReco,spectraReco,6)
slicerMC(spectrumPathMC,spectraMC,6)
for i in range(0,len(spectraReco)):
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spectraReco[i].get_config().get_par("energy_reco")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="Energy [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spectraReco[i].project("energy_reco"),histtype="stepfilled",label=spectraReco[i]._name)
par = spectraMC[i].get_config().get_par("energy_mc")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.hist(x,bins,weights=spectraMC[i].project("energy_mc"),histtype="stepfilled",label=spectraMC[i]._name,alpha=0.75)
ax.legend(loc=2)
fig.savefig("compare_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
if __name__=="__main__":
print "You need to compare the recon against the mc"
print "You should bin in bigger bins becuase you could then bin in 4d"
"""You need to plot the standard spectra"""
| mit |
jobelenus/thegreco | ignore/tracegen.py | 1 | 1364 | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et:wrap:ai:fileencoding=utf-8:
import collections
#import matplotlib.pyplot as plt
factor = 1/4
class TraceGenerator():
def __init__(self):
fname='/Users/jobelenus/work/thegreco/cpu.entries'
self.fname = fname
with open(self.fname) as f:
self.lines = f.readlines()
self.cpu = map(int, self.lines)
def gen_cpu_trace(self):
return self.cpu
def gen_mem_trace(self):
self.mem = collections.deque(self.cpu)
self.mem.rotate(len(self.cpu)/4)
return self.mem
def gen_disk_trace(self):
self.disk = collections.deque(self.cpu)
self.disk.rotate(2*len(self.cpu)/4)
return self.disk
def gen_net_trace(self):
self.net = collections.deque(self.cpu)
self.net.rotate(3*len(self.cpu)/4)
return self.net
def gen_trace(self):
self.gen_cpu_trace()
self.gen_mem_trace()
self.gen_disk_trace()
self.gen_net_trace()
self.trace = zip(self.cpu, self.mem, self.disk, self.net)
return self.trace
#tg = TraceGenerator()
#cpu = tg.gen_cpu_trace()
#mem = tg.gen_mem_trace()
#disk = tg.gen_disk_trace()
#net = tg.gen_net_trace()
#trace = zip(cpu, mem, disk, net)
#print trace
#plt.bar(range(0,len(cpu)), cpu)
#plt.show()
| gpl-3.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/testing/iptestcontroller.py | 7 | 21202 | # -*- coding: utf-8 -*-
"""IPython Test Process Controller
This module runs one or more subprocesses which will actually run the IPython
test suite.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import argparse
import json
import multiprocessing.pool
import os
import shutil
import signal
import sys
import subprocess
import time
from .iptest import have, test_group_names as py_test_group_names, test_sections, StreamCapturer
from IPython.utils.path import compress_user
from IPython.utils.py3compat import bytes_to_str
from IPython.utils.sysinfo import get_sys_info
from IPython.utils.tempdir import TemporaryDirectory
class TestController(object):
"""Run tests in a subprocess
"""
#: str, IPython test suite to be executed.
section = None
#: list, command line arguments to be executed
cmd = None
#: dict, extra environment variables to set for the subprocess
env = None
#: list, TemporaryDirectory instances to clear up when the process finishes
dirs = None
#: subprocess.Popen instance
process = None
#: str, process stdout+stderr
stdout = None
def __init__(self):
self.cmd = []
self.env = {}
self.dirs = []
def setup(self):
"""Create temporary directories etc.
This is only called when we know the test group will be run. Things
created here may be cleaned up by self.cleanup().
"""
pass
def launch(self, buffer_output=False):
# print('*** ENV:', self.env) # dbg
# print('*** CMD:', self.cmd) # dbg
env = os.environ.copy()
env.update(self.env)
output = subprocess.PIPE if buffer_output else None
stdout = subprocess.STDOUT if buffer_output else None
self.process = subprocess.Popen(self.cmd, stdout=output,
stderr=stdout, env=env)
def wait(self):
self.stdout, _ = self.process.communicate()
return self.process.returncode
def print_extra_info(self):
"""Print extra information about this test run.
If we're running in parallel and showing the concise view, this is only
called if the test group fails. Otherwise, it's called before the test
group is started.
The base implementation does nothing, but it can be overridden by
subclasses.
"""
return
def cleanup_process(self):
"""Cleanup on exit by killing any leftover processes."""
subp = self.process
if subp is None or (subp.poll() is not None):
return # Process doesn't exist, or is already dead.
try:
print('Cleaning up stale PID: %d' % subp.pid)
subp.kill()
except: # (OSError, WindowsError) ?
# This is just a best effort, if we fail or the process was
# really gone, ignore it.
pass
else:
for i in range(10):
if subp.poll() is None:
time.sleep(0.1)
else:
break
if subp.poll() is None:
# The process did not die...
print('... failed. Manual cleanup may be required.')
def cleanup(self):
"Kill process if it's still alive, and clean up temporary directories"
self.cleanup_process()
for td in self.dirs:
td.cleanup()
__del__ = cleanup
class PyTestController(TestController):
"""Run Python tests using IPython.testing.iptest"""
#: str, Python command to execute in subprocess
pycmd = None
def __init__(self, section, options):
"""Create new test runner."""
TestController.__init__(self)
self.section = section
# pycmd is put into cmd[2] in PyTestController.launch()
self.cmd = [sys.executable, '-c', None, section]
self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
self.options = options
def setup(self):
ipydir = TemporaryDirectory()
self.dirs.append(ipydir)
self.env['IPYTHONDIR'] = ipydir.name
self.workingdir = workingdir = TemporaryDirectory()
self.dirs.append(workingdir)
self.env['IPTEST_WORKING_DIR'] = workingdir.name
# This means we won't get odd effects from our own matplotlib config
self.env['MPLCONFIGDIR'] = workingdir.name
# From options:
if self.options.xunit:
self.add_xunit()
if self.options.coverage:
self.add_coverage()
self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
self.cmd.extend(self.options.extra_args)
@property
def will_run(self):
try:
return test_sections[self.section].will_run
except KeyError:
return True
def add_xunit(self):
xunit_file = os.path.abspath(self.section + '.xunit.xml')
self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
def add_coverage(self):
try:
sources = test_sections[self.section].includes
except KeyError:
sources = ['IPython']
coverage_rc = ("[run]\n"
"data_file = {data_file}\n"
"source =\n"
" {source}\n"
).format(data_file=os.path.abspath('.coverage.'+self.section),
source="\n ".join(sources))
config_file = os.path.join(self.workingdir.name, '.coveragerc')
with open(config_file, 'w') as f:
f.write(coverage_rc)
self.env['COVERAGE_PROCESS_START'] = config_file
self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
def launch(self, buffer_output=False):
self.cmd[2] = self.pycmd
super(PyTestController, self).launch(buffer_output=buffer_output)
js_prefix = 'js/'
def get_js_test_dir():
import IPython.html.tests as t
return os.path.join(os.path.dirname(t.__file__), '')
def all_js_groups():
import glob
test_dir = get_js_test_dir()
all_subdirs = glob.glob(test_dir + '*/')
return [js_prefix+os.path.relpath(x, test_dir) for x in all_subdirs if os.path.relpath(x, test_dir) != '__pycache__']
class JSController(TestController):
"""Run CasperJS tests """
def __init__(self, section):
"""Create new test runner."""
TestController.__init__(self)
self.section = section
js_test_dir = get_js_test_dir()
includes = '--includes=' + os.path.join(js_test_dir,'util.js')
test_cases = os.path.join(js_test_dir, self.section[len(js_prefix):])
self.cmd = ['casperjs', 'test', includes, test_cases]
def setup(self):
self.ipydir = TemporaryDirectory()
self.nbdir = TemporaryDirectory()
self.dirs.append(self.ipydir)
self.dirs.append(self.nbdir)
os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub ∂ir1', u'sub ∂ir 1a')))
os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub ∂ir2', u'sub ∂ir 1b')))
# start the ipython notebook, so we get the port number
self.server_port = 0
self._init_server()
if self.server_port:
self.cmd.append("--port=%i" % self.server_port)
else:
# don't launch tests if the server didn't start
self.cmd = [sys.executable, '-c', 'raise SystemExit(1)']
def print_extra_info(self):
print("Running tests with notebook directory %r" % self.nbdir.name)
@property
def will_run(self):
return all(have[a] for a in ['zmq', 'tornado', 'jinja2', 'casperjs', 'sqlite3'])
def _init_server(self):
"Start the notebook server in a separate process"
self.server_command = command = [sys.executable,
'-m', 'IPython.html',
'--no-browser',
'--ipython-dir', self.ipydir.name,
'--notebook-dir', self.nbdir.name,
]
# ipc doesn't work on Windows, and darwin has crazy-long temp paths,
# which run afoul of ipc's maximum path length.
if sys.platform.startswith('linux'):
command.append('--KernelManager.transport=ipc')
self.stream_capturer = c = StreamCapturer()
c.start()
self.server = subprocess.Popen(command, stdout=c.writefd, stderr=subprocess.STDOUT)
self.server_info_file = os.path.join(self.ipydir.name,
'profile_default', 'security', 'nbserver-%i.json' % self.server.pid
)
self._wait_for_server()
def _wait_for_server(self):
"""Wait 30 seconds for the notebook server to start"""
for i in range(300):
if self.server.poll() is not None:
return self._failed_to_start()
if os.path.exists(self.server_info_file):
self._load_server_info()
return
time.sleep(0.1)
print("Notebook server-info file never arrived: %s" % self.server_info_file,
file=sys.stderr
)
def _failed_to_start(self):
"""Notebook server exited prematurely"""
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
print("Notebook failed to start: ", file=sys.stderr)
print(self.server_command)
print(captured, file=sys.stderr)
def _load_server_info(self):
"""Notebook server started, load connection info from JSON"""
with open(self.server_info_file) as f:
info = json.load(f)
self.server_port = info['port']
def cleanup(self):
try:
self.server.terminate()
except OSError:
# already dead
pass
self.server.wait()
self.stream_capturer.halt()
TestController.cleanup(self)
def prepare_controllers(options):
"""Returns two lists of TestController instances, those to run, and those
not to run."""
testgroups = options.testgroups
if testgroups:
py_testgroups = [g for g in testgroups if (g in py_test_group_names) \
or g.startswith('IPython.')]
if 'js' in testgroups:
js_testgroups = all_js_groups()
else:
js_testgroups = [g for g in testgroups if g not in py_testgroups]
else:
py_testgroups = py_test_group_names
js_testgroups = all_js_groups()
if not options.all:
test_sections['parallel'].enabled = False
c_js = [JSController(name) for name in js_testgroups]
c_py = [PyTestController(name, options) for name in py_testgroups]
controllers = c_py + c_js
to_run = [c for c in controllers if c.will_run]
not_run = [c for c in controllers if not c.will_run]
return to_run, not_run
def do_run(controller, buffer_output=True):
"""Setup and run a test controller.
If buffer_output is True, no output is displayed, to avoid it appearing
interleaved. In this case, the caller is responsible for displaying test
output on failure.
Returns
-------
controller : TestController
The same controller as passed in, as a convenience for using map() type
APIs.
exitcode : int
The exit code of the test subprocess. Non-zero indicates failure.
"""
try:
try:
controller.setup()
if not buffer_output:
controller.print_extra_info()
controller.launch(buffer_output=buffer_output)
except Exception:
import traceback
traceback.print_exc()
return controller, 1 # signal failure
exitcode = controller.wait()
return controller, exitcode
except KeyboardInterrupt:
return controller, -signal.SIGINT
finally:
controller.cleanup()
def report():
"""Return a string with a summary report of test-related variables."""
inf = get_sys_info()
out = []
def _add(name, value):
out.append((name, value))
_add('IPython version', inf['ipython_version'])
_add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
_add('IPython package', compress_user(inf['ipython_path']))
_add('Python version', inf['sys_version'].replace('\n',''))
_add('sys.executable', compress_user(inf['sys_executable']))
_add('Platform', inf['platform'])
width = max(len(n) for (n,v) in out)
out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
avail = []
not_avail = []
for k, is_avail in have.items():
if is_avail:
avail.append(k)
else:
not_avail.append(k)
if avail:
out.append('\nTools and libraries available at test time:\n')
avail.sort()
out.append(' ' + ' '.join(avail)+'\n')
if not_avail:
out.append('\nTools and libraries NOT available at test time:\n')
not_avail.sort()
out.append(' ' + ' '.join(not_avail)+'\n')
return ''.join(out)
def run_iptestall(options):
"""Run the entire IPython test suite by calling nose and trial.
This function constructs :class:`IPTester` instances for all IPython
modules and package and then runs each of them. This causes the modules
and packages of IPython to be tested each in their own subprocess using
nose.
Parameters
----------
All parameters are passed as attributes of the options object.
testgroups : list of str
Run only these sections of the test suite. If empty, run all the available
sections.
fast : int or None
Run the test suite in parallel, using n simultaneous processes. If None
is passed, one process is used per CPU core. Default 1 (i.e. sequential)
inc_slow : bool
Include slow tests, like IPython.parallel. By default, these tests aren't
run.
xunit : bool
Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
coverage : bool or str
Measure code coverage from tests. True will store the raw coverage data,
or pass 'html' or 'xml' to get reports.
extra_args : list
Extra arguments to pass to the test subprocesses, e.g. '-v'
"""
to_run, not_run = prepare_controllers(options)
def justify(ltext, rtext, width=70, fill='-'):
ltext += ' '
rtext = (' ' + rtext).rjust(width - len(ltext), fill)
return ltext + rtext
# Run all test runners, tracking execution time
failed = []
t_start = time.time()
print()
if options.fast == 1:
# This actually means sequential, i.e. with 1 job
for controller in to_run:
print('Test group:', controller.section)
sys.stdout.flush() # Show in correct order when output is piped
controller, res = do_run(controller, buffer_output=False)
if res:
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
print()
else:
# Run tests concurrently
try:
pool = multiprocessing.pool.ThreadPool(options.fast)
for (controller, res) in pool.imap_unordered(do_run, to_run):
res_string = 'OK' if res == 0 else 'FAILED'
print(justify('Test group: ' + controller.section, res_string))
if res:
controller.print_extra_info()
print(bytes_to_str(controller.stdout))
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
except KeyboardInterrupt:
return
for controller in not_run:
print(justify('Test group: ' + controller.section, 'NOT RUN'))
t_end = time.time()
t_tests = t_end - t_start
nrunners = len(to_run)
nfail = len(failed)
# summarize results
print('_'*70)
print('Test suite completed for system with the following information:')
print(report())
took = "Took %.3fs." % t_tests
print('Status: ', end='')
if not failed:
print('OK (%d test groups).' % nrunners, took)
else:
# If anything went wrong, point out what command to rerun manually to
# see the actual errors and individual summary
failed_sections = [c.section for c in failed]
print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
nrunners, ', '.join(failed_sections)), took)
print()
print('You may wish to rerun these, with:')
print(' iptest', *failed_sections)
print()
if options.coverage:
from coverage import coverage
cov = coverage(data_file='.coverage')
cov.combine()
cov.save()
# Coverage HTML report
if options.coverage == 'html':
html_dir = 'ipy_htmlcov'
shutil.rmtree(html_dir, ignore_errors=True)
print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
sys.stdout.flush()
# Custom HTML reporter to clean up module names.
from coverage.html import HtmlReporter
class CustomHtmlReporter(HtmlReporter):
def find_code_units(self, morfs):
super(CustomHtmlReporter, self).find_code_units(morfs)
for cu in self.code_units:
nameparts = cu.name.split(os.sep)
if 'IPython' not in nameparts:
continue
ix = nameparts.index('IPython')
cu.name = '.'.join(nameparts[ix:])
# Reimplement the html_report method with our custom reporter
cov._harvest_data()
cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
html_title='IPython test coverage',
)
reporter = CustomHtmlReporter(cov, cov.config)
reporter.report(None)
print('done.')
# Coverage XML report
elif options.coverage == 'xml':
cov.xml_report(outfile='ipy_coverage.xml')
if failed:
# Ensure that our exit code indicates failure
sys.exit(1)
argparser = argparse.ArgumentParser(description='Run IPython test suite')
argparser.add_argument('testgroups', nargs='*',
help='Run specified groups of tests. If omitted, run '
'all tests.')
argparser.add_argument('--all', action='store_true',
help='Include slow tests not run by default.')
argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
help='Run test sections in parallel. This starts as many '
'processes as you have cores, or you can specify a number.')
argparser.add_argument('--xunit', action='store_true',
help='Produce Xunit XML results')
argparser.add_argument('--coverage', nargs='?', const=True, default=False,
help="Measure test coverage. Specify 'html' or "
"'xml' to get reports.")
argparser.add_argument('--subproc-streams', default='capture',
help="What to do with stdout/stderr from subprocesses. "
"'capture' (default), 'show' and 'discard' are the options.")
def default_options():
"""Get an argparse Namespace object with the default arguments, to pass to
:func:`run_iptestall`.
"""
options = argparser.parse_args([])
options.extra_args = []
return options
def main():
# iptest doesn't work correctly if the working directory is the
# root of the IPython source tree. Tell the user to avoid
# frustration.
if os.path.exists(os.path.join(os.getcwd(),
'IPython', 'testing', '__main__.py')):
print("Don't run iptest from the IPython source directory",
file=sys.stderr)
sys.exit(1)
# Arguments after -- should be passed through to nose. Argparse treats
# everything after -- as regular positional arguments, so we separate them
# first.
try:
ix = sys.argv.index('--')
except ValueError:
to_parse = sys.argv[1:]
extra_args = []
else:
to_parse = sys.argv[1:ix]
extra_args = sys.argv[ix+1:]
options = argparser.parse_args(to_parse)
options.extra_args = extra_args
run_iptestall(options)
if __name__ == '__main__':
main()
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/widgets/slider_demo.py | 13 | 1179 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
| mit |
heliopython/heliopy | doc/source/conf.py | 1 | 11504 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# heliopy documentation build configuration file, created by
# sphinx-quickstart
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
"""
import os
import sys
import unittest.mock as mock
import matplotlib
import heliopy
matplotlib.use('agg')
sys.path.insert(0, os.path.abspath('../../'))
html_favicon = '../../artwork/favicon.ico'
html_sidebars = {'**': ['docsidebar.html']}
# Pretend these modules exits so readthedocs builds
MOCK_MODULES = []
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
'sphinx_automodapi.automodapi',
'sphinx_issues'
]
intersphinx_mapping = {
'matplotlib': ('https://matplotlib.org', None),
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'astropy': ('https://docs.astropy.org/en/stable', None),
'sunpy': ('https://docs.sunpy.org/en/stable', None)}
sphinx_gallery_conf = {
'default_thumb_file': os.path.abspath(os.path.join('..', '..', 'artwork', 'logo_circle.png')),
'examples_dirs': '../../examples',
'gallery_dirs': 'auto_examples',
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('sphinx_gallery', 'heliopy'),
'min_reported_time': 0,
'abort_on_example_error': False,
}
issues_github_path = 'heliopython/heliopy'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HelioPy'
author = 'David Stansby'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = heliopy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'heliopy_theme'
html_theme_path = ['../']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {'canonical_url': 'http://docs.heliopy.org/en/stable/',
# 'analytics_id': 'UA-112461508-1',
# 'prev_next_buttons_location': 'None'}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'heliopy v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'heliopydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'heliopy.tex', 'HelioPy Documentation',
'David Stansby', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'HelioPy', 'HelioPy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
'''texinfo_documents = [
(master_doc, 'HelioPy', 'HelioPy Documentation',
author, 'HelioPy team', 'Python for space physics.',
'Miscellaneous'),
]
html_theme_options = {
"about_links": [
("About", "http://docs.heliopy.org/en/stable/guide/about.html", 1),
(
"Acknowledge HelioPy",
"http://docs.heliopy.org/en/stable/guide/citing.html",
1,
),
("Code of Conduct", "http://docs.heliopy.org/en/stable/guide/code-of-conduct.html", 1),
],
"navbar_links": [
("Documentation", "http://docs.heliopy.org/en/stable/index.html", 1),
("Get Help", "http://docs.heliopy.org/en/stable/index.html", 1),
],
}'''
| gpl-3.0 |
tfwillems/STRValidator | pedigree_analysis.py | 1 | 23183 | import matplotlib as mpl
mpl.use('Agg')
import collections
import sys
import numpy
import matplotlib.pyplot as plt
import vcf
from matplotlib.backends.backend_pdf import PdfPages
from fractions import Fraction
class TRIO:
def __init__(self, child, mother, father):
self.child = child
self.mother = mother
self.father = father
def __str__(self):
return "%s\t%s\t%s"%(self.child, self.mother, self.father)
class FATHER_SON_PAIR:
def __init__(self, son, father):
self.son = son
self.father = father
def __str__(self):
return "%s\t%s"%(self.son, self.father)
def read_1kg_pedigree_file(input_file, header=True):
data = open(input_file, "r")
if header:
data.readline()
trios, father_son_pairs = [], []
for line in data:
tokens = line.strip().split()
if tokens[2] != "0" and tokens[3] != "0":
child, dad, mom = tokens[1:4]
trios.append(TRIO(child, dad, mom))
if tokens[2] != "0" and tokens[4] == "1":
father_son_pairs.append(FATHER_SON_PAIR(tokens[1], tokens[2]))
data.close()
print("There are %d trios and %d father-son-pairs in the pedigree file"%(len(trios), len(father_son_pairs)))
return trios, father_son_pairs
# Find the index for the highest bin which is less than
# or equal to the provided value
def find_index(bins, value):
low = 0
high = len(bins)-1
while high > low + 1:
midval = bins[(low+high)/2]
if value > midval:
low = (low+high)/2
elif value < midval:
high = (low+high)/2 - 1
else:
return (low+high)/2
if value < bins[low]:
exit("Unable to find index. Exiting...")
if value >= bins[high]:
return high
else:
return low
def is_discordant(a11, a12, a21, a22):
if (a11 == a21 and a12 == a22) or (a11 == a22 and a12 == a21):
return False
else:
return True
def is_mendelian(a11, a12, a21, a22, a31, a32):
if (a31 == a11 or a31 == a12) and (a32 == a21 or a32 == a22):
return True
elif (a31 == a21 or a31 == a22) and (a32 == a11 or a32 == a12):
return True
else:
return False
def draw_bp_histogram(discordant_counts, pdfpage):
# Create histogram of father-son differences
bp_diff_counts = [collections.defaultdict(int) for _ in xrange(6)]
repeat_diff_counts = [collections.defaultdict(int) for _ in xrange(6)]
out_frame_count = 0
in_frame_count = 0
for key,val in discordant_counts.items():
bp_diff_counts[key[2]-1][key[1]-key[0]] += val
repeat_diff_counts[key[2]-1][Fraction(key[1]-key[0], key[2])] += val
for xlabel,diff_counts,in_frame in zip(["bps", "repeats"],
[bp_diff_counts, repeat_diff_counts],
[lambda bp,period: bp%period == 0, lambda rep,period: int(rep)==float(rep) ]):
fig = plt.figure()
ax = fig.add_subplot(111)
diffs = sorted(list(set(reduce(lambda x,y:x+y, map(lambda z: z.keys(), diff_counts)))))
colors = ['c', 'r', 'g', 'y', 'b', 'm']
heights = numpy.zeros(len(diffs))
for i in xrange(6):
vals = [diff_counts[i][x] for x in diffs]
if sum(vals) == 0:
continue
in_frame_trips = filter(lambda x: in_frame(x[0], i+1), zip(diffs, vals, heights))
out_frame_trips = filter(lambda x: not in_frame(x[0], i+1), zip(diffs, vals, heights))
if len(in_frame_trips) != 0:
x,y,h = zip(*in_frame_trips)
in_frame_count += sum(y)
ax.bar(x, y, bottom=h, align='center', color=colors[i], width=0.25, label=str(i+1))
if len(out_frame_trips) != 0:
x,y,h = zip(*out_frame_trips)
out_frame_count += sum(y)
ax.bar(x, y, bottom=h, align='center', color=colors[i], width=0.25, label=str(i+1), hatch='//')
heights += vals
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r"$father-son ("+xlabel+")$")
ax.set_ylabel(r"$n_{calls}$")
ax.legend()
pdfpage.savefig(fig)
print("IN FRAME=%d, OUT FRAME=%d"%(in_frame_count/2, out_frame_count/2))
class CHRY_STATS:
def __init__(self, father_son_pairs, call_output):
self.pairs = father_son_pairs
self.output_calls = open(call_output, "w")
def initialize(self, vcf_reader):
sample_indices = dict(zip(vcf_reader.samples, range(len(vcf_reader.samples))))
self.pair_indices = []
for i in xrange(len(self.pairs)):
if self.pairs[i].son not in sample_indices:
exit("Unable to assess chrY inheritance because no data was found for " + self.pairs[i].son)
if self.pairs[i].father not in sample_indices:
exit("Unable to assess chrY inheritance because no data was found for " + self.pairs[i].father)
self.pair_indices.append([sample_indices[self.pairs[i].father], sample_indices[self.pairs[i].son]])
self.missing_data_skip_counts = numpy.zeros(len(self.pair_indices))
self.het_gt_skip_counts = numpy.zeros(len(self.pair_indices))
self.num_concordant = 0
self.num_discordant = 0
self.pair_info = {}
self.discordant_counts = collections.defaultdict(int)
self.call_count = 0
def process_record(self, record):
motif_len = len(record.INFO['MOTIF'])
for i in xrange(len(self.pair_indices)):
if any(map(lambda x: record.samples[x]['GT'] is None, self.pair_indices[i])):
self.missing_data_skip_counts[i] += 1
continue
self.call_count += 1
father = record.samples[self.pair_indices[i][0]]
son = record.samples[self.pair_indices[i][1]]
gb_1a, gb_1b = map(int, father['GB'].split("/"))
gb_2a, gb_2b = map(int, son['GB'].split("/"))
self.output_calls.write("%d\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\t%s\n"%(self.call_count, record.CHROM, record.POS, record.INFO['END'],
gb_1a + gb_1b, gb_2a + gb_2b,
gb_1a, gb_1b, gb_2a, gb_2b, father.sample, son.sample))
if gb_1a != gb_1b or gb_2a != gb_2b:
self.het_gt_skip_counts[i] += 1
if gb_1a != gb_1b:
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"], father.sample, str(gb_1a) + "|" + str(gb_1b), "HET"))
if gb_2a != gb_2b:
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"], father.sample, str(gb_2a) + "|" + str(gb_2b), "HET"))
continue
if gb_1a != gb_2a:
self.num_discordant += 1
self.discordant_counts[(gb_1a, gb_2a, motif_len)] +=1
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"],
father.sample + "," + son.sample,
str(gb_1a) + "," + str(gb_2b), "DISCORDANT"))
else:
self.num_concordant += 1
if (gb_1a, gb_2a) not in self.pair_info:
self.pair_info[(gb_1a, gb_2a)] = []
self.pair_info[(gb_1a, gb_2a)].append((record.CHROM, record.POS, record.INFO['END'], father.sample+"-"+son.sample))
def finish(self, pdfpage, output_prefix):
print("WARNING: Skipped " + str(self.missing_data_skip_counts) + " comparisons due to missing data for one or more individuals")
print("WARNING: Skipped " + str(self.het_gt_skip_counts) + " comparisons due to heterozygous genotypes for one or more individuals")
if self.num_discordant + self.num_concordant != 0:
print("%d vs. %d = %f Percent"%(self.num_discordant, self.num_concordant, 100.0*self.num_discordant/(self.num_discordant+self.num_concordant)))
else:
print("WARNING: No chrY calls were applicable for comparison")
# Create bubble plot using all data
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = zip(*self.pair_info.keys())
s = numpy.array(map(len, self.pair_info.values()))*10
ax.scatter(x, y, s=s, alpha=0.7)
ax.set_xlabel("Father's genotype (bp)")
ax.set_ylabel("Son's genotype (bp)")
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(numpy.arange(min(x)-5, max(x)+5, 1.0), numpy.arange(min(y)-5, max(y)+5, 1.0), linestyle='--', color='k')
pdfpage.savefig(fig)
# Create histogram of father-son differences
draw_bp_histogram(self.discordant_counts, pdfpage)
viz_output = open(output_prefix+"_chrY.csv", "w")
viz_output.write(",".join(["X","Y", "CHROMS", "STARTS", "STOPS", "SAMPLES"]) + "\n")
for key,val in self.pair_info.items():
chroms, positions, ends, samples = map(list, zip(*val))
viz_output.write(",".join([str(key[0]), str(key[1]), "_".join(chroms), "_".join(map(str, positions)), "_".join(map(str, ends)), "_".join(map(str, samples))]) + "\n")
viz_output.close()
self.output_calls.close()
class MENDELIAN_STATS:
def __init__(self, trios, coverage_bins, quality_bins, max_coverage, quality_thresholds):
self.trios = trios
self.coverage_bins = coverage_bins
self.quality_bins = quality_bins
self.max_coverage = max_coverage
self.qual_thresh = quality_thresholds
def initialize(self, vcf_reader):
sample_indices = dict(zip(vcf_reader.samples, range(len(vcf_reader.samples))))
self.trio_indices = []
for i in xrange(len(self.trios)):
if self.trios[i].child not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].child)
if self.trios[i].father not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].father)
if self.trios[i].mother not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].mother)
# Father, Mother, Child
self.trio_indices.append(map(lambda x: sample_indices[x], [self.trios[i].father, self.trios[i].mother, self.trios[i].child]))
self.coverage_bins = numpy.concatenate(([-100000], self.coverage_bins))
self.quality_bins = numpy.concatenate(([-100000], self.quality_bins))
# Quality/Coverage x Trios x Period x Thresholds
self.all_loci_nstrs = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.all_loci_nmend = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.disc_loci_nstrs = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.disc_loci_nmend = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.missing_data_skip_counts = numpy.zeros(len(self.trios))
self.coverage_skip_counts = numpy.zeros(len(self.trios))
# Trios x Period x Thresholds
self.all_loci_nstrs_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.all_loci_nmend_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.disc_loci_nstrs_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.disc_loci_nmend_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
def process_record(self, record):
for i in xrange(len(self.trios)):
if any(map(lambda x: record.samples[x]['GT'] is None, self.trio_indices[i])):
self.missing_data_skip_counts[i] += 1
continue
if 'X' in record.CHROM or 'x' in record.CHROM or 'Y' in record.CHROM or 'y' in record.CHROM:
continue
q1, q2, q3 = map(lambda x: record.samples[x]["Q"], self.trio_indices[i])
c1, c2, c3 = map(lambda x: record.samples[x]["DP"], self.trio_indices[i])
a11, a21 = record.samples[self.trio_indices[i][0]]["GT"].split("/")
a21, a22 = record.samples[self.trio_indices[i][1]]["GT"].split("/")
a31, a32 = record.samples[self.trio_indices[i][2]]["GT"].split("/")
discordant = is_discordant(a11, a12, a21, a22)
mendelian = is_mendelian(a11, a12, a21, a22, a31, a32)
# Filter out loci with too high of coverage
if max(c1, c2, c3) > self.max_coverage:
self.coverage_skip_counts[i] += 1
continue
coverage = min(c1, c2, c3)
bin_idx = find_index(self.coverage_bins, coverage)
motif_len = len(record.INFO["MOTIF"])-2
self.all_loci_nstrs [0][i][motif_len][bin_idx] += 1
self.all_loci_nmend [0][i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs[0][i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend[0][i][motif_len][bin_idx] += discordant*mendelian*1
quality = min(q1, q2, q3)
bin_idx = find_index(self.quality_bins, quality)
self.all_loci_nstrs [1][i][motif_len][bin_idx] += 1
self.all_loci_nmend [1][i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs[1][i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend[1][i][motif_len][bin_idx] += discordant*mendelian*1
coverage = min(c1, c2, c3)
bin_idx = find_index(self.coverage_bins, coverage)
if quality > self.qual_thresh[motif_len]:
self.all_loci_nstrs_min_q [i][motif_len][bin_idx] += 1
self.all_loci_nmend_min_q [i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs_min_q [i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend_min_q [i][motif_len][bin_idx] += discordant*mendelian*1
def finish(self, pdfpage):
print("WARNING: Skipped " + str(self.missing_data_skip_counts) + " loci due to missing data for one or more individual")
print("WARNING: Skipped " + str(self.coverage_skip_counts) + " loci due to too high coverage")
# Iterate over coverage and quality stats
types = ['Coverage', 'Quality', 'Coverage']
bins = [self.coverage_bins, self.quality_bins, self.coverage_bins]
for n in xrange(3):
# Sum across all trios
if n == 0 or n == 1:
all_loci_nstrs = numpy.sum(self.all_loci_nstrs [n], axis=0)
all_loci_nmend = numpy.sum(self.all_loci_nmend [n], axis=0)
disc_loci_nstrs = numpy.sum(self.disc_loci_nstrs[n], axis=0)
disc_loci_nmend = numpy.sum(self.disc_loci_nmend[n], axis=0)
else:
all_loci_nstrs = numpy.sum(self.all_loci_nstrs_min_q, axis=0)
all_loci_nmend = numpy.sum(self.all_loci_nmend_min_q, axis=0)
disc_loci_nstrs = numpy.sum(self.disc_loci_nstrs_min_q, axis=0)
disc_loci_nmend = numpy.sum(self.disc_loci_nmend_min_q, axis=0)
# Create plots for individual periods
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_ylabel("Fraction Mendelian")
ax1.set_title("All sites")
ax2 = fig.add_subplot(222, sharey=ax1)
ax2.set_title("Discordant parental sites")
ax3 = fig.add_subplot(223, sharex=ax1)
ax3.set_xlabel(types[n] + " threshold")
ax3.set_ylabel("# genotypes")
ax3.set_yscale('log')
ax4 = fig.add_subplot(224, sharex=ax2, sharey=ax3)
ax4.set_xlabel(types[n] + " threshold")
ax4.set_yscale('log')
box1 = ax1.get_position()
ax1.set_position([box1.x0, box1.y0, box1.width*0.9, box1.height])
ax2.set_position([box1.x0 + box1.width*1.15, box1.y0, box1.width*0.9, box1.height])
box3 = ax3.get_position()
ax3.set_position([box3.x0, box3.y0, box3.width*0.9, box3.height])
ax4.set_position([box3.x0 + box3.width*1.15, box3.y0, box3.width*0.9, box3.height])
font_size = 9
for i in xrange(5):
nstrs_all = numpy.cumsum(all_loci_nstrs [i][::-1])[::-1]
nmend_all = numpy.cumsum(all_loci_nmend [i][::-1])[::-1]
nstrs_disc = numpy.cumsum(disc_loci_nstrs[i][::-1])[::-1]
nmend_disc = numpy.cumsum(disc_loci_nmend[i][::-1])[::-1]
all_fracs = (1.0*nmend_all/nstrs_all)[1:]
disc_fracs = (1.0*nmend_disc/nstrs_disc)[1:]
ax1.plot(bins[n][1:], all_fracs, '-o', label=str(i+1))
ax2.plot(bins[n][1:], disc_fracs, '-o', label=str(i+1))
ax3.plot(bins[n][1:], nstrs_all[1:], '-o', label=str(i+1))
ax4.plot(bins[n][1:], nstrs_disc[1:], '-o', label=str(i+1))
ax4.legend(bbox_to_anchor=(1.05, 0.9, 0.25, 0.2), loc='center left')
for ax in [ax1, ax2, ax3, ax4]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
pdfpage.savefig(fig)
# Create plots using all periods
# Sum across all periods
all_loci_nstrs = numpy.sum(all_loci_nstrs, axis=0)
all_loci_nmend = numpy.sum(all_loci_nmend, axis=0)
disc_loci_nstrs = numpy.sum(disc_loci_nstrs, axis=0)
disc_loci_nmend = numpy.sum(disc_loci_nmend, axis=0)
# Transform into running sums
all_loci_nstrs = numpy.cumsum(all_loci_nstrs[::-1])[::-1]
all_loci_nmend = numpy.cumsum(all_loci_nmend[::-1])[::-1]
disc_loci_nstrs = numpy.cumsum(disc_loci_nstrs[::-1])[::-1]
disc_loci_nmend = numpy.cumsum(disc_loci_nmend[::-1])[::-1]
# Calculate the fraction of Mendelian inheritance for all loci and discordant loci
all_loci_fracs = (1.0*all_loci_nmend/all_loci_nstrs)[1:]
disc_loci_fracs = (1.0*disc_loci_nmend/disc_loci_nstrs)[1:]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_ylabel("Fraction Mendelian")
ax1.set_title("All sites")
ax1.plot(bins[n][1:], all_loci_fracs, '-o')
ax2 = fig.add_subplot(222, sharey=ax1)
ax2.plot(bins[n][1:], disc_loci_fracs, '-o')
ax2.set_title("Discordant parental sites")
ax3 = fig.add_subplot(223, sharex=ax1)
ax3.set_xlabel(types[n] + " threshold")
ax3.set_ylabel("# genotypes")
ax3.set_yscale('log')
ax3.plot(bins[n][1:], all_loci_nstrs[1:], '-o')
ax4 = fig.add_subplot(224, sharex=ax2, sharey=ax3)
ax4.set_xlabel(types[n] + " threshold")
ax4.set_yscale('log')
ax4.plot(bins[n][1:], disc_loci_nstrs[1:], '-o')
for ax in [ax1, ax2, ax3, ax4]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
pdfpage.savefig(fig)
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(bins[n][1:], all_loci_fracs, '-o', color='b')
ax1.set_ylabel("Fraction Mendelian")
ax1.set_xlabel(types[n] + " threshold")
ax2 = ax1.twinx()
ax2.set_yscale('log')
ax2.plot(bins[n][1:], all_loci_nstrs[1:], '-o', color='g')
pdfpage.savefig(fig)
ax1.axis('equal')
pdfpage.savefig(fig)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(bins[n][1:], all_loci_fracs, '-o', color='b')
ax1.set_ylabel("Fraction Mendelian")
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax2.set_xlabel(types[n] + " threshold")
ax2.plot(bins[n][1:], all_loci_nstrs[1:], '-o', color='g')
ax2.set_yscale('log')
ax2.set_ylabel("# Called loci across trios")
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
pdfpage.savefig(fig)
def main():
print("Invocation syntax: python pedigree_analysis.py 1kg_pedigree_file.txt vcf_file.vcf output_file.pdf")
trios, father_son_pairs = read_1kg_pedigree_file(sys.argv[1], header=True)
vcf_reader = vcf.Reader(filename=sys.argv[2])
call_stats = sys.argv[3]
samples = vcf_reader.samples
trios_with_data = []
pairs_with_data = []
for trio in trios:
if trio.child in samples and trio.mother in samples and trio.father in samples:
trios_with_data.append(trio)
print("There are %d trios with data"%len(trios_with_data))
for pair in father_son_pairs:
if pair.father in samples and pair.son in samples:
pairs_with_data.append(pair)
print("There are %d father-son pairs with data"%(len(pairs_with_data)))
coverage_bins = numpy.append(numpy.arange(1.001, 5.0011, 1.0), numpy.arange(6.001, 18.0011, 2.0))
quality_bins = numpy.arange(0.0, 1.0, 0.1)
quality_thresh = [0.9, 0.5, 0.5, 0.5, 0.5, 0.5]
max_coverage = 100
processors = [CHRY_STATS(pairs_with_data, call_stats)]
#mend_stats = MENDELIAN_STATS(trios_with_data, coverage_bins, quality_bins, max_coverage, quality_thresh)
for proc in processors:
proc.initialize(vcf_reader)
for record in vcf_reader:
for proc in processors:
proc.process_record(record)
pp = PdfPages(sys.argv[3]+".pdf")
for proc in processors:
proc.finish(pp, sys.argv[3])
pp.close()
return 0
if __name__ == "__main__":
main()
| gpl-3.0 |
DTOcean/dtocean-core | tests/test_data_definitions_xgrid2d.py | 1 | 4013 | import pytest
import numpy as np
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import XGrid2D
def test_XGrid2D_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "XGrid2D" in all_objs.keys()
def test_XGrid2D():
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": [None, 'm', 'POWER!']})
test = XGrid2D()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b.values.shape == (2,3)
assert b.units == 'POWER!'
assert b.y.units == 'm'
def test_get_None():
test = XGrid2D()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".nc"])
def test_XGrid2D_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": [None, 'm', 'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin.meta.result = meta
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result.values.shape == (2,3)
assert result.units == 'POWER!'
assert result.y.units == 'm'
def test_XGrid2D_auto_plot(tmpdir):
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_XGrid2D_auto_plot_reverse(tmpdir):
raw = {"values": np.random.randn(3, 2),
"coords": [[-2, 0, 2], ['a', 'b']]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
| gpl-3.0 |
thp44/delphin_6_automation | data_process/2d_1d/archieve/temperature.py | 1 | 18075 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import matplotlib.dates as mdates
import pandas as pd
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Application
colors = {'top': '#FBBA00', 'mid': '#B81A5D', 'bottom': '#79C6C0', '1d_brick': '#000000', '1d_mortar': '#BDCCD4'}
project_dict = {'dresden_zp_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0352e2cb22f2c4f15b4': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb0a102e2cb22f2c4f17e9': '2d'}
},
'dresden_zd_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0ba2e2cb22f2c4f15f1': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb2dc02e2cb22f2c4f1873': '2d'}
},
'potsdam_high_ratio_uninsulated_4a':
{'map':
{'5ad9e3462e2cb22f2c4f162e': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adcc9702e2cb22f2c4f18fd': '2d'}
},
'dresden_zp_low_ratio_uninsulated_4a':
{'map':
{'5ad9e6192e2cb22f2c4f175f': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adda7172e2cb20baca57c6e': '2d'}
},
'dresden_zd_low_ratio_uninsulated_4a':
{'map':
{'5ad9e44f2e2cb22f2c4f16a8': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adcd4402e2cb22f2c4f1987': '2d'}
},
'potsdam_low_ratio_uninsulated_4a':
{'map': {'5ad9e4f22e2cb22f2c4f16e5': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5add9b902e2cb20baca57be4': '2d'}
},
'dresden_zp_high_ratio_insulated_4a':
{'map': {'5ae824252e2cb22d48db5955': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae355cf2e2cb2201055c1a4': '2d'}
},
'dresden_zd_high_ratio_insulated_4a':
{'map': {'5ae824d82e2cb22d48db5998': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae398f12e2cb2201055c263': '2d'}
},
'potsdam_high_ratio_insulated_4a':
{'map':
{'5ae82bac2e2cb21560008fe8': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae6ca982e2cb2201055c322': '2d'}
},
'dresden_zp_low_ratio_insulated_4a':
{'map':
{'5ae82e5d2e2cb21560009137': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6fdbf2e2cb20d5891272f': '2d'}
},
'dresden_zd_low_ratio_insulated_4a':
{'map':
{'5ae82cb12e2cb2156000906e': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6d9bf2e2cb2201055c3e1': '2d'}
},
'potsdam_low_ratio_insulated_4a':
{'map':
{'5ae82d3b2e2cb215600090b1': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6edaf2e2cb20d58912670': '2d'}
},
}
result_folder = r'U:\RIBuild\2D_1D\Results'
files = ['temperature profile.d6o']
# Functions
def get_points(result: dict, geo: dict):
points = []
for index_ in result['indices']:
x_ = geo['element_geometry'][index_][1]
y_ = geo['element_geometry'][index_][2]
points.append({'cell': index_, 'x': x_, 'y': y_})
return points
def add_data_to_points(points: list, results: dict, result_name: str):
for cell_ in results['result'].keys():
cell_index = int(cell_.split('_')[1])
for point in points:
if point['cell'] == cell_index:
point[result_name] = np.array(results['result'][cell_][8760:])
break
def main(project_):
projects = list(project_dict[project_]['map'].keys())
parsed_dicts = {'brick_1d': {'temp': {}, 'geo': {}},
'mortar_1d': {'temp': {}, 'geo': {}},
'2d': {'temp': {}, 'geo': {}}, }
for p_ in projects:
for mp_key in project_dict[project_]['map'].keys():
if p_ == mp_key:
key = project_dict[project_]['map'][mp_key]
folder = result_folder + f'/{p_}/results'
geo_file = [file
for file in os.listdir(folder)
if file.endswith('.g6a')][0]
parsed_dicts[key]['temp'], _ = delphin_parser.d6o_to_dict(folder, files[0])
parsed_dicts[key]['geo'] = delphin_parser.g6a_to_dict(folder, geo_file)
x_date = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]))]
# Brick 1D
brick_1d = get_points(parsed_dicts['brick_1d']['temp'], parsed_dicts['brick_1d']['geo'])
brick_1d.sort(key=lambda point: point['x'])
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['temp'], 'temperature')
# Mortar 1D
mortar_1d = get_points(parsed_dicts['mortar_1d']['temp'], parsed_dicts['mortar_1d']['geo'])
mortar_1d.sort(key=lambda point: point['x'])
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['temp'], 'temperature')
# 2D
sim_2d = get_points(parsed_dicts['2d']['temp'], parsed_dicts['2d']['geo'])
sim_2d.sort(key=lambda point: (point['x'], point['y']))
add_data_to_points(sim_2d, parsed_dicts['2d']['temp'], 'temperature')
# Plots
def plot_locations(quantity):
# Axes 00
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[0]['x']:.4f} and 2D-Location: {sim_2d[0]['x']:.4f}")
plt.plot(x_date, brick_1d[0][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[0][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[1][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[2][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 01
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[1]['x']:.4f} and 2D-Location: {sim_2d[3]['x']:.4f}")
plt.plot(x_date, brick_1d[1][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[1][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[3][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[4][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[5][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 10
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[2]['x']:.4f} and 2D-Location: {sim_2d[6]['x']:.4f}")
plt.plot(x_date, brick_1d[2][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[2][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[6][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[7][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[8][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 11
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[3]['x']:.4f} and 2D-Location: {sim_2d[9]['x']:.4f}")
plt.plot(x_date, brick_1d[3][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[3][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[9][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[10][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[11][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 20
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[4]['x']:.4f} and 2D-Location: {sim_2d[12]['x']:.4f}")
plt.plot(x_date, brick_1d[4][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[4][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[12][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[13][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[14][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 21
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[5]['x']:.4f} and 2D-Location: {sim_2d[15]['x']:.4f}")
plt.plot(x_date, brick_1d[5][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[5][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[15][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[16][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[17][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
#plot_locations(quantity='temperature')
#plt.show()
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return (abs(x2 - x1))/abs(x2) * 100
def differences(i, plots=False):
avg_2d = np.mean([sim_2d[i]['temperature'], sim_2d[i+2]['temperature'], sim_2d[i+2]['temperature']], axis=0)
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('C')
plt.figure()
plt.title(f"Temperature - Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
def differences_weighted(i, plots=False):
avg_2d = np.average(a=[sim_2d[i]['temperature'],
sim_2d[i+2]['temperature'],
sim_2d[i+2]['temperature']],
axis=0,
weights=[56, 24., 56])
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Weighted Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
plt.figure()
plt.title(f"Temperature - Weighted Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
dataframes = []
weighted_dataframes = []
for index in range(len(brick_1d)):
dataframes.append(differences(index))
weighted_dataframes.append(differences_weighted(index))
#plt.show()
result_dataframe = pd.concat(dataframes, axis=1)
w_result_dataframe = pd.concat(weighted_dataframes, axis=1)
absolute_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
absolute_df.columns = absolute_df.columns.droplevel(level=2)
relative_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
relative_df.columns = relative_df.columns.droplevel(level=2)
w_absolute_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
w_absolute_df.columns = w_absolute_df.columns.droplevel(level=2)
w_relative_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
w_relative_df.columns = w_relative_df.columns.droplevel(level=2)
plt.figure()
ax = absolute_df.boxplot()
ax.set_ylim(-20, 20)
ax.set_ylabel('Temperature - C')
ax.set_title('Absolute Differences')
#plt.show()
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
def excel():
writer = pd.ExcelWriter(out_folder + '/temperature.xlsx')
relative_df.describe().to_excel(writer, 'relative')
absolute_df.describe().to_excel(writer, 'absolute')
writer.save()
#excel()
def save_relative():
hdf_file = out_folder + '/relative_temperature.h5'
w_relative_df.to_hdf(hdf_file, project_, append=True)
save_relative()
for project_key in project_dict.keys():
print(f'Processing {project_key}')
main(project_key) | mit |
BiaDarkia/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 33 | 4174 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points,
n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.in1d(uncertainty_index, unlabeled_indices)][:5]
# keep track of indices that we get labels for
delete_indices = np.array([])
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" %
((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation='none')
sub.set_title("predict: %i\ntrue: %i" % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.", y=1.15)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2,
hspace=0.85)
plt.show()
| bsd-3-clause |
stharrold/ARCHIVED_bench_fastq | bench_fastq/utils.py | 2 | 15946 | #!/usr/bin/env python
"""Utils to parse the terminal output from bench_compress.sh
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import json
import datetime as dt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def parse_elapsed(elapsed):
"""Parse string of elapsed time from output of Unix 'time' command into
`datetime.timedelta`.
Parameters
----------
elapsed : string
Elapsed time field output from Unix 'time' command.
Format: [HH:]MM:SS[.SSS]
Returns
-------
elapsed_dt : datetime.timedelta
Elapsed time as ``datetime.timedelta``.
"""
elapsed_arr = elapsed.split(':')
if len(elapsed_arr) == 2:
hours = '0'
[minutes, seconds] = elapsed_arr
elif len(elapsed_arr) == 3:
[hours, minutes, seconds] = elapsed_arr
else:
raise AssertionError(("Program error. Elapsed time does not have 2 or 3 fields:\n" +
"{ea}").format(ea=elapsed_arr))
hours_int = int(float(hours))
minutes_int = int(float(minutes))
seconds_int = int(float(seconds))
milliseconds_int = int((float(seconds) - seconds_int) / 0.001)
elapsed_dt = dt.timedelta(hours=hours_int,
minutes=minutes_int,
seconds=seconds_int,
milliseconds=milliseconds_int)
return elapsed_dt
def recursive_timedelta_to_totsec(dobj):
"""Recursively convert ``datetime.timedelta`` elements to total seconds
in a ``dict``.
Call this function before writing the ``dict`` to JSON.
Parameters
----------
dobj : dict
``dict`` that may contain ``datetime.timedelta`` elements. ``dict`` may
be nested.
Returns
-------
dobj_converted : dict
``dict`` with ``datetime.timedelta`` elements converted to
total seconds.
"""
dobj_converted = {}
for key in dobj:
if isinstance(dobj[key], dt.timedelta):
dobj_converted[key] = dobj[key].total_seconds()
elif isinstance(dobj[key], dict):
dobj_converted[key] = recursive_timedelta_to_totsec(dobj=dobj[key])
else:
dobj_converted[key] = dobj[key]
return dobj_converted
def parse_compress(fin, fout=None):
"""Parse terminal output from bench_compress.sh
Parse by filename, file size, compression method, compression ratio, compression and decompression speed.
Note: This function is rigidly dependent upon bench_compress.sh.
Parameters
----------
fin : string
Path to text file with terminal output.
fout : {None}, string, optional
Path to output .json file of parsed terminal output.
Returns
-------
parsed : dict
``dict`` of parsed terminal output.
"""
# Check input.
fpath = os.path.abspath(fin)
if not os.path.isfile(fpath):
raise IOError("File does not exist:\n{fpath}".format(fpath=fpath))
if fout is not None:
if not os.path.splitext(fout)[1] == '.json':
raise IOError(("File extension is not '.json':\n" +
"{fout}").format(fout=fout))
# Parse text file into dict.
parsed = {}
skip_lines = None
catch_initial_size = None
catch_comp_cmd = None
catch_comp_time = None
catch_comp_size = None
catch_decomp_cmd = None
catch_decomp_time = None
catch_decomp_size = None
with open(fpath, 'rb') as fobj:
for line in fobj:
line = line.rstrip()
if line.startswith('Begin processing:'):
line_arr = line.split(':')
fname = os.path.splitext(os.path.basename(line_arr[1]))[0]
parsed[fname] = {}
continue
# Note: Typo in original script "Intial". Do not correct.
elif line.startswith('Intial .fastq size:'):
catch_initial_size = True
skip_lines = 1
continue
elif catch_initial_size and skip_lines >= 0:
if skip_lines > 0:
skip_lines -= 1
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname]['size_bytes'] = int(line_arr[0])
assert os.path.basename(line_arr[1]) == fname
catch_initial_size = False
skip_lines = None
continue
elif line.startswith('Iteration:'):
line_arr = line.split(':')
iteration = int(line_arr[1])
parsed[fname][iteration] = {}
continue
elif line.startswith('Testing'):
line_arr = line.rstrip(':').split()
method = line_arr[1]
parsed[fname][iteration][method] = {}
catch_comp_cmd = True
continue
elif catch_comp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['compress'] = {}
parsed[fname][iteration][method]['compress']['command'] = line
catch_comp_cmd = False
catch_comp_time = True
continue
elif catch_comp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['compress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['compress']['CPU_percent'] = pct_cpu
catch_comp_time = False
catch_comp_size = True
continue
elif catch_comp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['compress']['size_bytes'] = int(line_arr[0])
catch_comp_size = False
skip_lines = None
catch_decomp_cmd = True
continue
elif catch_decomp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['decompress'] = {}
parsed[fname][iteration][method]['decompress']['command'] = line
catch_decomp_cmd = False
catch_decomp_time = True
continue
elif catch_decomp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['decompress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['decompress']['CPU_percent'] = pct_cpu
catch_decomp_time = False
catch_decomp_size = True
continue
elif catch_decomp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['decompress']['size_bytes'] = int(line_arr[0])
if parsed[fname]['size_bytes'] != parsed[fname][iteration][method]['decompress']['size_bytes']:
# noinspection PyPep8
print(("WARNING: File size before and after compression test do not match.\n" +
"file name = {fname}\n" +
"method = {method}\n" +
"initial size (bytes) = {init_size}\n" +
"final size (bytes) = {finl_size}").format(fname=fname, method=method,
init_size=parsed[fname]['size_bytes'],
finl_size=parsed[fname][iteration][method]['decompress']['size_bytes']),
file=sys.stderr)
catch_decomp_size = False
skip_lines = None
continue
# Write out dict as JSON.
if fout is not None:
parsed_converted = recursive_timedelta_to_totsec(dobj=parsed)
print("Writing parsed text to: {fout}".format(fout=fout))
with open(fout, "wb") as fobj:
json.dump(parsed_converted, fobj, indent=4, sort_keys=True)
return parsed
def parsed_dict_to_df(parsed_dict):
"""Convert ``dict`` from parse_compress to ``pandas.dataframe``.
Parameters
----------
parsed_dict : dict
``dict`` of parsed terminal output.
Returns
-------
parsed_df : pandas.dataframe
``pandas.dataframe`` with heirarchical index by filename, iteration,
method, quantity.
"""
# TODO: make recursive method, e.g. http://stackoverflow.com/questions/9538875/recursive-depth-of-python-dictionary
filename_df_dict = {}
for filename in parsed_dict:
iteration_df_dict = {}
for iteration in parsed_dict[filename]:
method_df_dict = {}
# Skip size_bytes for file since not a nested dict.
if isinstance(parsed_dict[filename][iteration], dict):
for method in parsed_dict[filename][iteration]:
method_df_dict[method] = pd.DataFrame.from_dict(parsed_dict[filename][iteration][method],
orient='columns')
iteration_df_dict[iteration] = pd.concat(method_df_dict, axis=1)
filename_df_dict[filename] = pd.concat(iteration_df_dict, axis=1)
parsed_df = pd.concat(filename_df_dict, axis=1)
parsed_df.index.names = ['quantity']
parsed_df.columns.names = ['filename', 'iteration', 'method', 'process']
return parsed_df
def condense_parsed_df(parsed_df, parsed_dict):
"""Condense ``pandas.dataframe`` from parsed terminal output.
Calculate compression/decompression rate in GB per minute and compression ratio, averaging over iterations and
taking median of results.
Parameters
----------
parsed_df : pandas.DataFrame
``pandas.DataFrame`` from `parsed_dict_to_df`.
Index name: quantity
Heirarchical column names: filename, method, process, iteration
parsed_dict : dict
Nested ``dict`` from parse_compress.
Returns
-------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
See Also
--------
parsed_dict_to_df, parse_compress, reduce_condensed_df
"""
# Calculate compression/decompression rate in GB per minute and compression ratio.
# Drop quantities except for 'GB_per_minute' and 'compression_ratio'. Drop test files and incomplete tests.
# Average over iterations. Take median of results.
condensed_df = parsed_df.stack(['filename', 'method', 'process', 'iteration']).unstack('quantity').copy()
condensed_df['elapsed_seconds'] = condensed_df['elapsed_time'].apply(
lambda x: x.total_seconds() if isinstance(x, dt.timedelta) else x)
condensed_df['elapsed_seconds'] = condensed_df['elapsed_seconds'].apply(lambda x: np.NaN if x == 0.0 else x)
condensed_df['GB_per_minute'] = np.NaN
condensed_df['compression_ratio'] = np.NaN
# TODO: Use .values to vectorize
for fname in condensed_df.index.levels[0].values:
# TODO: remove SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame
condensed_df.loc[fname, 'GB_per_minute'].update(
(parsed_dict[fname]['size_bytes'] / condensed_df.loc[fname, 'elapsed_seconds']).multiply(60.0 / 1.0E9))
condensed_df.loc[fname, 'compression_ratio'].update(
condensed_df.loc[fname, 'size_bytes'].div(parsed_dict[fname]['size_bytes']))
return condensed_df
def reduce_condensed_df(condensed_df):
"""Reduce ``pandas.DataFrame`` from `condense_parsed_df` by averaging over iterations and taking the median over
file names.
Parameters
----------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
Returns
-------
reduced_ser : pandas.Series'
``pandas.Series`` from `condense_parsed_df`.
Heirarchical index names: method, process, quantity
See Also
--------
condense_parsed_df, plot_rate, plot_ratio
"""
reduced_ser = condensed_df.stack().unstack(['filename', 'method', 'process', 'quantity']).mean()
reduced_ser = reduced_ser.unstack(['method', 'process', 'quantity']).median()
return reduced_ser
def plot_rate(reduced_ser, fout=None):
"""Plot processing rate vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_ratio
"""
plt.figure()
pd.DataFrame.plot(reduced_ser.unstack(['quantity'])['GB_per_minute'].unstack(['process']),
title="Processing rate vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
legend = plt.legend(loc='best', title="Process")
legend.get_texts()[0].set_text('Compress')
legend.get_texts()[1].set_text('Decompress')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Processing rate (GB per minute)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
def plot_ratio(reduced_ser, fout=None):
"""Plot compression ratio vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_rate
"""
plt.figure()
pd.Series.plot(reduced_ser.unstack(['quantity'])['compression_ratio'].unstack(['process'])['compress'],
title="Compression size ratio vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Compression size ratio\n(compressed size / decompressed size)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
| mit |
ESMG/ESMG-configs | CCS1/plot_vort.py | 1 | 2295 | import numpy as np
import netCDF4
import os
import sys
import subprocess
import pyroms
from pyroms_toolbox import jday2date
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# draw line around map projection limb.
# color background of map projection region.
# missing values over land will show up this color.
# plot sst, then ice with pcolor
# add a title.
#year = int(sys.argv[1])
#lst_year = [year]
lst_file = []
#for year in lst_year:
# year = np.str(year)
#lst = subprocess.getoutput('ls clima/*.nc')
lst = subprocess.getoutput('ls 19800110.ocean_daily.nc')
lst = lst.split()
lst_file = lst_file + lst
#grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('prog.nc')
grd = netCDF4.Dataset('sea_ice_geometry.nc', "r")
clat = grd.variables["geolatb"][:]
clon = grd.variables["geolonb"][:]
m = Basemap(llcrnrlon=-121., llcrnrlat=17., urcrnrlon=-125.0, urcrnrlat=53.0,\
rsphere=(6378137.00,6356752.3142),\
resolution='h', projection='lcc',\
lat_0=30., lat_1=40.0, lon_0=-78.)
x, y = m(clon, clat)
levels = np.arange(-.6, 0.6, 0.01)
cmap = plt.cm.get_cmap("seismic")
for file in lst_file:
print("Plotting "+file)
nc = netCDF4.Dataset(file, "r")
time = nc.variables["time"][:]
ntim = len(time)
# for it in range(10):
for it in range(0,ntim,30):
fig = plt.figure(figsize=(4,9))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
# ax.axis(xmin=-300,xmax=300)
# m.drawmapboundary(fill_color='0.3')
m.drawcoastlines()
ssh = nc.variables["RV"][it,0,:-1,:-1]
ssh *= 1.e4
time = nc.variables["time"][it]
cs = m.contourf(x, y, ssh, levels=levels, cmap=cmap, extend='both')
# csa = m.contour(x, y, ssh, levels=levels, linewidths=(0.5,))
# cs = plt.contourf(clon, clat, ssh, levels=levels, cmap=cmap, extend='both')
plt.title('Surface RV')
# csa = plt.contour(clon, clat, ssh, levels=levels, linewidths=(0.5,))
cbaxes = fig.add_axes([0.1, 0.05, 0.8, 0.02])
plt.colorbar(orientation='horizontal', cax=cbaxes)
print('printing frame:', it)
fig.savefig('movie/vort_%(number)04d.png'%{'number': it})
plt.close()
nc.close()
| gpl-3.0 |
mrcslws/htmresearch | projects/thing_classification/thing_convergence.py | 3 | 13625 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file is used to run Thing experiments using simulated sensations.
"""
import random
import os
from math import ceil
import numpy as np
import pprint
import matplotlib.pyplot as plt
from sklearn import manifold, random_projection
from htmresearch.frameworks.layers.l2_l4_inference import (
L4L2Experiment, rerunExperimentFromLogfile)
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 256,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.01,
"minThreshold": 19,
"predictedSegmentDecrement": 0.0,
"activationThreshold": 19,
"sampleSize": 20,
"implementation": "etm",
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 256 * 16,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.5,
"synPermProximalDec": 0.0,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 9,
"sampleSizeProximal": 10,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 30,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.001,
"learningMode": True,
}
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
return convergenceSum/len(inferenceStats)
def loadThingObjects(numCorticalColumns=1, objDataPath='./data/'):
"""
Load simulated sensation data on a number of different objects
There is one file per object, each row contains one feature, location pairs
The format is as follows
[(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
[list of active bits of feature]]
The content before "=>" is the true 3D location / sensation
The number of active bits in the location and feature is listed after "=>".
@return A simple object machine
"""
# create empty simple object machine
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=numCorticalColumns,
numFeatures=0,
numLocations=0,
)
for _ in range(numCorticalColumns):
objects.locations.append([])
objects.features.append([])
objFiles = []
for f in os.listdir(objDataPath):
if os.path.isfile(os.path.join(objDataPath, f)):
if '.log' in f:
objFiles.append(f)
idx = 0
OnBitsList = []
for f in objFiles:
objName = f.split('.')[0]
objName = objName[4:]
objFile = open('{}/{}'.format(objDataPath, f))
sensationList = []
for line in objFile.readlines():
# parse thing data file and extract feature/location vectors
sense = line.split('=>')[1].strip(' ').strip('\n')
OnBitsList.append(float(line.split('] =>')[0].split('/')[1]))
location = sense.split('],[')[0].strip('[')
feature = sense.split('],[')[1].strip(']')
location = np.fromstring(location, sep=',', dtype=np.uint8)
feature = np.fromstring(feature, sep=',', dtype=np.uint8)
# add the current sensation to object Machine
sensationList.append((idx, idx))
for c in range(numCorticalColumns):
objects.locations[c].append(set(location.tolist()))
objects.features[c].append(set(feature.tolist()))
idx += 1
objects.addObject(sensationList, objName)
print "load object file: {} object name: {} sensation # {}".format(
f, objName, len(sensationList))
OnBitsList
OnBitsList = np.array(OnBitsList)
plt.figure()
plt.hist(OnBitsList)
return objects, OnBitsList
def trainNetwork(objects, numColumns, l4Params, l2Params, verbose=False):
print " Training sensorimotor network ..."
objectNames = objects.objects.keys()
numObjects = len(objectNames)
exp = L4L2Experiment("shared_features",
L2Overrides=l2Params,
L4Overrides=l4Params,
numCorticalColumns=numColumns)
exp.learnObjects(objects.provideObjectsToLearn())
settlingTime = 1
L2Representations = exp.objectL2Representations
# if verbose:
# print "Learned object representations:"
# pprint.pprint(L2Representations, width=400)
# print "=========================="
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
maxSensationNumber = 30
overlapMat = np.zeros((numObjects, numObjects, maxSensationNumber))
numL2ActiveCells = np.zeros((numObjects, maxSensationNumber))
for objectIdx in range(numObjects):
objectId = objectNames[objectIdx]
obj = objects[objectId]
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
exp.sendReset()
for sensationNumber in range(maxSensationNumber):
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if sensationNumber >= len(objectCopy):
pair = objectCopy[-1]
else:
pair = objectCopy[sensationNumber]
if numColumns > 1:
raise NotImplementedError
else:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": False,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
for i in range(numObjects):
overlapMat[objectIdx, i, sensationNumber] = len(
exp.getL2Representations()[0] &
L2Representations[objects.objects.keys()[i]][0])
# if verbose:
# print "Intersection with {}:{}".format(
# objectNames[i], overlapMat[objectIdx, i])
for c in range(numColumns):
numL2ActiveCells[objectIdx, sensationNumber] += len(
exp.getL2Representations()[c])
print "{} # L2 active cells {}: ".format(sensationNumber,
numL2ActiveCells[
objectIdx, sensationNumber])
if verbose:
print "Output for {}: {}".format(objectId, exp.getL2Representations())
print "Final L2 active cells {}: ".format(
numL2ActiveCells[objectIdx, sensationNumber])
print
exp.sendReset()
expResult = {'overlapMat': overlapMat,
'numL2ActiveCells': numL2ActiveCells}
return expResult
def computeAccuracy(expResult, objects):
objectNames = objects.objects.keys()
overlapMat = expResult['overlapMat'][:, :, -1]
numL2ActiveCells = expResult['numL2ActiveCells'][:, -1]
numCorrect = 0
numObjects = overlapMat.shape[0]
numFound = 0
percentOverlap = np.zeros(overlapMat.shape)
for i in range(numObjects):
for j in range(i, numObjects):
percentOverlap[i, j] = overlapMat[i, j] # / np.min([numL2ActiveCells[i], numL2ActiveCells[j]])
objectNames = np.array(objectNames)
for i in range(numObjects):
# idx = np.where(overlapMat[i, :]>confuseThresh)[0]
idx = np.where(percentOverlap[i, :] == np.max(percentOverlap[i, :]))[0]
print " {}, # sensations {}, best match is {}".format(
objectNames[i], len(objects[objectNames[i]]), objectNames[idx])
found = len(np.where(idx == i)[0]) > 0
numFound += found
if not found:
print "<=========== {} was not detected ! ===========>".format(objectNames[i])
if len(idx) > 1:
continue
if idx[0] == i:
numCorrect += 1
accuracy = float(numCorrect)/numObjects
numPerfect = len(np.where(numL2ActiveCells<=40)[0])
print "accuracy: {} ({}/{}) ".format(accuracy, numCorrect, numObjects)
print "perfect retrival ratio: {} ({}/{}) ".format(
float(numPerfect)/numObjects, numPerfect, numObjects)
print "Object detection ratio {}/{} ".format(numFound, numObjects)
return accuracy
def runExperimentAccuracyVsL4Thresh():
accuracyVsThresh = []
threshList = np.arange(13, 20)
for thresh in threshList:
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
l4Params['minThreshold'] = thresh
l4Params['activationThreshold'] = thresh
objects = loadThingObjects(1, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
accuracyVsThresh.append(accuracy)
plt.figure()
plt.plot(threshList, accuracyVsThresh, '-o')
plt.xlabel('L4 distal Threshold')
plt.ylabel('Classification Accuracy')
plt.savefig('accuracyVsL4Thresh.pdf')
return threshList, accuracyVsThresh
if __name__ == "__main__":
# uncomment to plot accuracy as a function of L4 threshold
# threshList, accuracyVsThresh = runExperimentAccuracyVsL4Thresh()
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
verbose = 1
objects, OnBitsList = loadThingObjects(numColumns, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
objectNames = objects.objects.keys()
numObjects = len(objectNames)
overlapMat = expResult['overlapMat']
numL2ActiveCells = expResult['numL2ActiveCells']
objectNames = objects.objects.keys()
numObjects = len(objectNames)
plt.figure()
for sensationNumber in range(10):
plt.imshow(overlapMat[:, :, sensationNumber])
plt.xticks(range(numObjects), objectNames, rotation='vertical', fontsize=4)
plt.yticks(range(numObjects), objectNames, fontsize=4)
plt.title('pairwise overlap at step {}'.format(sensationNumber))
plt.xlabel('target representation')
plt.ylabel('inferred representation')
plt.tight_layout()
plt.savefig('plots/overlap_matrix_step_{}.png'.format(sensationNumber))
# plot number of active cells for each object
plt.figure()
objectNamesSort = []
idx = np.argsort(expResult['numL2ActiveCells'][:, -1])
for i in idx:
objectNamesSort.append(objectNames[i])
plt.plot(numL2ActiveCells[idx, -1])
plt.xticks(range(numObjects), objectNamesSort, rotation='vertical', fontsize=5)
plt.tight_layout()
plt.ylabel('Number of active L2 cells')
plt.savefig('plots/number_of_active_l2_cells.pdf')
#
| agpl-3.0 |
anderson1008/NOCulator | hring/src/Script/my_print.py | 1 | 8437 | #!/usr/bin/python
import sys
import os
import re
import fnmatch
import string
import matplotlib.pyplot as plt
def print_period(stat):
# use to profile the application running solo.
# stat is an iterator or array
i = 0
for item in stat:
plt.plot(item, label=str(i))
plt.legend()
i = i + 1
def print_double_array (x):
for x_i in x:
sys.stdout.write(str("%.2f" % x_i) + ' ')
print "\n"
sys.stdout.flush()
def print_int_array (x):
for x_i in x:
sys.stdout.write(str(x_i) + ' ')
print "\n"
sys.stdout.flush()
def print_stat_dict (my_stat):
for key, value in iter(sorted(my_stat.iteritems())):
if type(value) is not list:
print key.ljust(20), value.ljust(20)
# else:
# for element in value:
# print element
def print_power (stat):
output_str = '\n\n############# Power Distribution ################\n\n'
output_str = output_str + ''.ljust(15) + 'Static'.ljust(20) + 'Dynamic'.ljust(20) + 'Overall'.ljust(20) + '\n'
## print BLESS
static_percent = "{:.2f}".format(stat[0]/stat[2]*100)
dynamic_percent = "{:.2f}".format(stat[1]/stat[2]*100)
output_str = output_str + 'BLESS'.ljust(15) + ('%s (%s%%)'%("{:.2f}".format(stat[0]),static_percent)).ljust(20) + ('%s (%s%%)'%("{:.2f}".format(stat[1]),dynamic_percent)).ljust(20) + str(stat[2]).ljust(20) + '\n'
# print MBNoC
static_percent = "{:.2f}".format(stat[3]/stat[5]*100)
dynamic_percent = "{:.2f}".format(stat[4]/stat[5]*100)
output_str = output_str + 'MBNoC'.ljust(15) + ('%s (%s%%)'%("{:.2f}".format(stat[3]),static_percent)).ljust(20) + ('%s (%s%%)'%("{:.2f}".format(stat[4]),dynamic_percent)).ljust(20) + str(stat[5]).ljust(20)
output_str = output_str + '\n'
print output_str
def print_power_breakdown (stat):
output_str = '\n\n############# Power Breakdown ################\n\n'
output_str = output_str + ''.ljust(15) + 'Static'.ljust(20) + 'Dynamic'.ljust(20) + 'Overall'.ljust(20) + '\n'
output_str = output_str + 'Component'.ljust(15) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + '\n'
print_order = ['DFF', 'portAlloc', 'RC', 'Xbar', 'Local', 'permNet', 'link']
for component in range (0, 7):
output_str = output_str + print_order[component].ljust(15)
for metric in stat:
output_str = output_str + str(metric[component+1]).ljust(10)
output_str = output_str + '\n'
print output_str
def print_final_stat (stat):
output_str = '\n\n############# Overall ################\n\n'
output_str = output_str + ''.ljust(20) + 'weighted_speedup'.ljust(20) + 'Energy'.ljust(20) + 'Throughput'.ljust(20) + 'Defection Rate'.ljust(20) + '\n'
output_str = output_str + 'Load'.ljust(10) + 'Count'.ljust(10)
for i in range (0, 4):
output_str = output_str + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10)
output_str = output_str + '\n' + 'Low'.ljust(10)
for metric in stat[0]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'Medium'.ljust(10)
for metric in stat[1]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'High'.ljust(10)
for metric in stat[2]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'Average'.ljust(10)
for metric in stat[3]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
print output_str
return output_str
def print_for_plot (stat):
output_str = '\n\n############# Print for plot ################\n\n'
output_str = output_str + 'Baseline of each metrics of interest is 1.\nEach metric is normailized to BLESS with the same network size.\n\n'
output_str = output_str + 'Load'.ljust(8) + 'Count'.ljust(8) + 'ws'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x6'.ljust(8) + 'engy'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + 'th'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + 'defl'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + '\n'
groups = ['Low','Medium','High','Average']
i = 0
for element in stat:
output_str = output_str + groups[i].ljust(8)
for metric in element:
output_str = output_str + str(metric).ljust(8)
i = i + 1
output_str = output_str + '\n'
print output_str
return output_str
def print_synth (stat, design):
traffic = str(stat.pop(0))
network = str(stat.pop(0))
#output_str = '\n\n############# ' + "Traffic = " + traffic.ljust(20) + "Network = " + network.ljust(20) + ' ################\n\n'
#output_str = output_str + 'Inject_rate'.ljust(20) + 'Energy'.ljust(20) + 'Latency'.ljust(20) + 'Deflect_rate'.ljust(20) + 'Throughput'.ljust(20) + '\n\n'
#output_str = output_str + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + '\n'
output_str = '\n\n############# ' + 'Traffic = ' + traffic.ljust(20) + 'Network = ' + network.ljust(20) + ' ################\n\n'
type_stat = len(stat) / len(design)
#for i in range (0, type_stat):
space = (len(design)+1)*10
output_str = output_str + 'Energy'.ljust(space) + 'Latency'.ljust(space) + 'Throughput'.ljust(space) + 'Deflect_rate'.ljust(space) + '\n\n'
for i in range (1, 80, 1):
load = "{:.2f}".format(float(i)/100)
for j in range (0, len(stat)):
if j % len(design) is 0:
output_str = output_str + load.ljust(10)
if load in stat[j]:
output_str = output_str + str(stat[j][load]).ljust(10)
else:
output_str = output_str + '-'.ljust(10)
output_str = output_str + '\n'
#for i in range (0, len(stat[0])):
# for j in range (0, len(stat)):
# output_str = output_str + str(stat[j][i]).ljust(10)
# output_str = output_str + '\n'
output_str = output_str + '********* Based on %u data points ************' % len(stat[0])
print output_str
def print_synth_wrt_load (stat, design):
traffic = str(stat.pop(0))
network = str(stat.pop(0))
output_str = '\n\n############# ' + 'Traffic = ' + traffic.ljust(20) + 'Network = ' + network.ljust(20) + ' ################\n\n'
type_stat = len(stat) / len(design)
#for i in range (0, type_stat):
space = (len(design)+1)*10
output_str = output_str + 'Latency'.ljust(space) + 'Throughput'.ljust(space) + 'Deflect_rate'.ljust(space) + '\n\n'
for i in range (0, type_stat):
output_str = output_str + 'InjRate'.ljust(10)
for element in design:
output_str = output_str + element.ljust(10)
output_str = output_str + '\n'
for i in range (1, 80, 1):
load = "{:.2f}".format(float(i)/100)
for j in range (0, len(stat)):
if j % len(design) is 0:
output_str = output_str + load.ljust(10)
if load in stat[j]:
output_str = output_str + str(stat[j][load]).ljust(10)
else:
output_str = output_str + '-'.ljust(10)
output_str = output_str + '\n'
output_str = output_str + '********* Based on %u data points ************' % len(stat[0])
print output_str
def print_synth_avg_reduction (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
baseline = stat[0]
output_str = output_str + '\n' + '1'.ljust(10)
stat.pop(0)
for element in stat:
reduction = ''
if baseline > 0: reduction = "{:.2f}".format((baseline - element) / baseline)
output_str = output_str + reduction.ljust(10)
output_str = output_str + '\n'
print output_str
def print_synth_avg_gain (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
baseline = stat[0]
output_str = output_str + '\n' + '1'.ljust(10)
stat.pop(0)
for element in stat:
reduction = ''
if baseline > 0: reduction = "{:.2f}".format((element - baseline) / baseline)
output_str = output_str + reduction.ljust(10)
output_str = output_str + '\n'
print output_str
def print_final (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
output_str = output_str + '\n'
for element in stat:
output_str = output_str + "{:.2f}".format(float(element)).ljust(10)
output_str = output_str + '\n'
print output_str
| mit |
MechCoder/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
istellartech/OpenGoddard | examples/11_Polar_TSTO_Taiki.py | 1 | 19117 | # -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Rocket:
# Atmosphere Parameter
# Use US Standard Atmosphere 1976
stdAtmo = np.loadtxt("./11_Polar_TSTO_Taiki/US_standard_atmosphere.csv",delimiter=",",skiprows=2)
stdAltitude = stdAtmo[:,0] * 1000.0 #converted to km -> m
stdPressure= stdAtmo[:,2] # [Pa]
stdDensity= stdAtmo[:,3] # [kg/m3]
stdSoundSpeed = stdAtmo[:,4] # [m/s]
# 線形補完用
# 高度範囲外(<0, 86<)はfill_valueが外挿
airPressure = interpolate.interp1d(stdAltitude, stdPressure, bounds_error = False, fill_value = (stdPressure[0], 0.0))
airDensity = interpolate.interp1d(stdAltitude, stdDensity, bounds_error = False, fill_value = (stdDensity[0], 0.0))
airSound = interpolate.interp1d(stdAltitude, stdSoundSpeed, bounds_error = False, fill_value = (stdSoundSpeed[0], stdSoundSpeed[-1]))
# Drag Coefficient
CdLog = np.loadtxt("./11_Polar_TSTO_Taiki/Cd.csv", delimiter=",", skiprows=1)
Cd = interpolate.interp1d(CdLog[:,0], CdLog[:,1],fill_value="extrapolate")
def __init__(self):
# Earth Parameter
self.GMe = 3.986004418 * 10**14 # Earth gravitational constant [m^3/s^2]
self.Re = 6371.0 * 1000 # Earth Radius [m]
self.g0 = 9.80665 # Gravitational acceleration on Earth surface [m/s^2]
# Target Parameter
self.Htarget = 561.0 * 1000 # Altitude [m]
self.Rtarget = self.Re + self.Htarget # Orbit Radius [m]
self.Vtarget = np.sqrt(self.GMe / self.Rtarget) # [m/s]
# Launch Site Parameter
self.lat_taiki = 42.506167 # [deg]
self.Vt_equator = 1674.36 # [km/h]
self.Vt_taiki = self.Vt_equator * np.cos(self.lat_taiki * np.pi / 180.0) * 1000.0 / 3600.0 # Radial Velocity of Earth Surface [m/s]
self.inclination = 96.7 # [deg]
self.V0 = self.Vt_taiki * np.cos(-self.inclination * np.pi / 180.0) # [m/s]
self.H0 = 10.0 # Initial Altitude [m]
# Structure Parameter
# Mdryがパラメータ
self.Mdry = [1300.0, 220.0] # Dry Mass [kg], [1st stage, 2nd stage]
self.beta = [10.0, 15.0] # Structure Efficienty [%], [1st stage, 2nd stage]
self.Mpayload = 100.0 # Payload Mass [kg]
self.M0 = [self.Mdry[0] / self.beta[0] * 100.0, self.Mdry[1] / self.beta[1] * 100.0] # Initial Stage Mass [kg], [1st stage, 2nd stage]
self.Mp = [self.M0[0] - self.Mdry[0], self.M0[1] - self.Mdry[1]] # Propellant Mass [kg], [1st stage, 2nd stage]
self.M0[1] = self.M0[1] + self.Mpayload
self.Minit = self.M0[0] + self.M0[1] # Initial Total Mass [kg]
self.d = [1.8, 1.8] # Diameter [m], [1st stage, 2nd stage]
self.A = [0.25 * self.d[0] ** 2 * np.pi, 0.25 * self.d[1] ** 2 * np.pi] # Projected Area [m^2], [1st stage, 2nd stage]
# Engine Parameter
self.Cluster = 9
self.Isp = [261.0 + 0.0, 322.0 + 0.0] # Specific Impulse [s], [1st stage at SL, 2nd stage at vac]
self.dth = [53.9, 53.9] # Throat Diameter [mm], [1st stage, 2nd stage]
self.Ath = [0.25 * (self.dth[0] / 1000.0) ** 2 * np.pi, 0.25 * (self.dth[1] / 1000.0) ** 2 * np.pi] # Throat Area [m^2], [1st stage, 2nd stage]
self.AR = [20.0, 140.0] # Area Ratio, [1st stage, 2nd stage]
self.Ae = [self.Ath[0] * self.AR[0] * self.Cluster, self.Ath[1] * self.AR[1]] # Exit Area [m^2], [1st stage, 2nd stage]
# =======
self.ThrustMax = [33.3, 4.2] # Maximum Thrust [ton], [1st stage at SL, 2nd stage at vac]
self.ThrustMax = [self.ThrustMax[0] * self.g0 * 1000.0, self.ThrustMax[1] * self.g0 * 1000.0] # [N]
# self.ThrustLevel = 1.8 # [G] M0[0] * n G
# self.ThrustMax = [self.M0[0] * self.ThrustLevel * self.g0, self.M0[0] * self.ThrustLevel / self.Cluster * self.g0 + self.airPressure(self.Htarget) * self.Ae[1]] # Maximum Thrust [N], [1st stage at SL, 2nd stage at vac]
# =======
self.refMdot = [self.ThrustMax[0] / (self.Isp[0] * self.g0), self.ThrustMax[1] / (self.Isp[1] * self.g0)] # Isp補正用参照値
self.MaxQ = 500000.0 # Pa
self.MaxG = 20.0 # G
def dynamics(prob, obj, section):
R = prob.states(0, section) # Orbit Radius [m]
theta = prob.states(1, section) #
Vr = prob.states(2, section)
Vt = prob.states(3, section)
m = prob.states(4, section)
Tr = prob.controls(0, section)
Tt = prob.controls(1, section)
g0 = obj.g0
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
dThrust = [(obj.airPressure(obj.H0) - obj.airPressure(R - obj.Re)) * obj.Ae[0], obj.airPressure(R - obj.Re) * obj.Ae[1]]
Isp = obj.Isp[section] + dThrust[section] / (obj.refMdot[section] * g0)
# US standard atmosphereだと86 km以降はrho = 0でDrag = 0
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
dx = Dynamics(prob, section)
dx[0] = Vr
dx[1] = Vt / R
dx[2] = Tr / m - Dr / m - g + Vt**2 / R
dx[3] = Tt / m - Dt / m - (Vr * Vt) / R
dx[4] = - np.sqrt(Tr**2 + Tt**2) / (Isp * g0)
return dx()
def equality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
unit_R = prob.unit_states[0][0]
unit_V = prob.unit_states[0][2]
unit_m = prob.unit_states[0][4]
result = Condition()
# event condition
result.equal(R0[0], obj.Re + obj.H0, unit=unit_R) # 初期地表
result.equal(theta0[0], 0.0)
result.equal(Vr0[0], 0.0, unit=unit_V)
result.equal(Vt0[0], obj.V0 , unit=unit_V)
result.equal(m0[0], obj.Minit, unit=unit_m) # (1st stage and 2nd stage and Payload) initial
# knotting condition
result.equal(m1[0], obj.M0[1], unit=unit_m) # (2nd stage + Payload) initial
result.equal(R1[0], R0[-1], unit=unit_R)
result.equal(theta1[0], theta0[-1])
result.equal(Vr1[0], Vr0[-1], unit=unit_V)
result.equal(Vt1[0], Vt0[-1], unit=unit_V)
# Target Condition
result.equal(R1[-1], obj.Rtarget, unit=unit_R) # Radius
result.equal(Vr[-1], 0.0, unit=unit_V) # Radius Velocity
result.equal(Vt[-1], obj.Vtarget, unit=unit_V)
return result()
def inequality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr0 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt0 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dr1 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
Dt1 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r0 = (Tr - Dr0) / m
a_t0 = (Tt - Dt0) / m
a_mag0 = np.sqrt(a_r0**2 + a_t0**2) # [m/s2]
a_r1 = (Tr - Dr1) / m
a_t1 = (Tt - Dt1) / m
a_mag1 = np.sqrt(a_r1**2 + a_t1**2) # [m/s2]
# Thrust
T0 = np.sqrt(Tr0**2 + Tt0**2)
T1 = np.sqrt(Tr1**2 + Tt1**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
result = Condition()
# lower bounds
result.lower_bound(R, obj.Re, unit=prob.unit_states[0][0]) # 地表以下
result.lower_bound(m0, obj.Mdry[0] + obj.M0[1], unit=prob.unit_states[0][4]) # 乾燥質量以下
result.lower_bound(m1, obj.Mdry[1], unit=prob.unit_states[0][4])
result.lower_bound(Tr, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
result.lower_bound(Tt, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
# upper bounds
result.upper_bound(m0, obj.Minit, unit=prob.unit_states[0][4]) # 初期質量以上
result.upper_bound(m1, obj.M0[1], unit=prob.unit_states[0][4])
result.upper_bound(Tr0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tt0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(T0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tr1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(Tt1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(T1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(q, obj.MaxQ, unit = prob.unit_states[0][0])
result.upper_bound(a_mag0, obj.MaxG * obj.g0)
result.upper_bound(a_mag1, obj.MaxG * obj.g0)
return result()
def cost(prob, obj):
m1 = prob.states(4, 1)
return -m1[-1] / prob.unit_states[1][4]
# ========================
# Program Starting Point
time_init = [0.0, 200, 800]
n = [20, 30]
num_states = [5, 5]
num_controls = [2, 2]
max_iteration = 90
flag_savefig = True
savefig_file = "./11_Polar_TSTO_Taiki/TSTO_"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
# ------------------------
# create instance of operating object
obj = Rocket()
unit_R = obj.Re
unit_theta = 1
unit_V = np.sqrt(obj.GMe / obj.Re)
unit_m = obj.M0[0]
unit_t = unit_R / unit_V
unit_T = unit_m * unit_R / unit_t ** 2
prob.set_unit_states_all_section(0, unit_R)
prob.set_unit_states_all_section(1, unit_theta)
prob.set_unit_states_all_section(2, unit_V)
prob.set_unit_states_all_section(3, unit_V)
prob.set_unit_states_all_section(4, unit_m)
prob.set_unit_controls_all_section(0, unit_T)
prob.set_unit_controls_all_section(1, unit_T)
prob.set_unit_time(unit_t)
# ========================
# Initial parameter guess
# altitude profile
R_init = Guess.cubic(prob.time_all_section, obj.Re, 0.0, obj.Rtarget, 0.0)
# Guess.plot(prob.time_all_section, R_init, "Altitude", "time", "Altitude")
# if(flag_savefig):plt.savefig(savefig_file + "guess_alt" + ".png")
# theta
theta_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, np.deg2rad(25.0), 0.0)
# velocity
Vr_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
Vt_init = Guess.linear(prob.time_all_section, obj.V0, obj.Vtarget)
# Guess.plot(prob.time_all_section, V_init, "Velocity", "time", "Velocity")
# mass profile -0.6
M_init0 = Guess.cubic(prob.time_all_section, obj.Minit, 0.0, obj.Mdry[0] + obj.M0[1], 0.0)
M_init1 = Guess.cubic(prob.time_all_section, obj.M0[1], 0.0, obj.Mdry[1], 0.0)
M_init = np.hstack((M_init0, M_init1))
# Guess.plot(prob.time_all_section, M_init, "Mass", "time", "Mass")
# if(flag_savefig):plt.savefig(savefig_file + "guess_mass" + ".png")
# thrust profile
# T_init = Guess.zeros(prob.time_all_section)
Tr_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]*9/10, 0.0, 0.0, 0.0)
Tr_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]*9/10, 0.0, 0.0, 0.0)
Tr_init = np.hstack((Tr_init0, Tr_init1))
# Tt_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, 0.0, 0.0)
Tt_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]/10, 0.0, 0.0, 0.0)
Tt_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]/10, 0.0, 0.0, 0.0)
Tt_init = np.hstack((Tr_init0, Tr_init1))
# Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
# if(flag_savefig):plt.savefig(savefig_file + "guess_thrust" + ".png")
# plt.show()
# ========================
# Substitution initial value to parameter vector to be optimized
# non dimensional values (Divide by scale factor)
prob.set_states_all_section(0, R_init)
prob.set_states_all_section(1, theta_init)
prob.set_states_all_section(2, Vr_init)
prob.set_states_all_section(3, Vt_init)
prob.set_states_all_section(4, M_init)
prob.set_controls_all_section(0, Tr_init)
prob.set_controls_all_section(1, Tt_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics, dynamics]
prob.knot_states_smooth = [False]
prob.cost = cost
# prob.cost_derivative = cost_derivative
prob.equality = equality
prob.inequality = inequality
def display_func():
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
m = prob.states_all_section(4)
ts = prob.time_knots()
tf = prob.time_final(-1)
print("m0 : {0:.5f}".format(m[0]))
print("mf : {0:.5f}".format(m[-1]))
print("mdry : {0:.5f}".format(obj.Mdry[0]))
print("payload : {0:.5f}".format(m[-1] - obj.Mdry[1]))
print("max altitude: {0:.5f}".format(R[-1] - obj.Re))
print("MECO time : {0:.3f}".format(ts[1]))
print("final time : {0:.3f}".format(tf))
prob.solve(obj, display_func, ftol=1e-8)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
time = prob.time_update()
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
# ------------------------
# Calculate necessary variables
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r = (Tr - Dr) / m / obj.g0
a_t = (Tt - Dt) / m / obj.g0
a_mag = np.sqrt(a_r**2 + a_t**2) / obj.g0 # [G]
# Thrust
T = np.sqrt(Tr**2 + Tt**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
Isp0 = obj.Isp[0] + dThrust0 / (obj.refMdot[0] * obj.g0)
Isp1 = obj.Isp[1] + dThrust1 / (obj.refMdot[1] * obj.g0)
Thrust_SL = T - np.append(dThrust0, dThrust1)
np.savetxt(savefig_file + "Thrust_Log" + ".csv", np.hstack((time, Thrust_SL, T, Tr, Tt)), delimiter=',')
# ------------------------
# Visualizetion
plt.close("all")
plt.figure()
plt.title("Altitude profile")
plt.plot(time, (R - obj.Re) / 1000, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "altitude" + ".png")
np.savetxt(savefig_file + "Altitude_Log" + ".csv", np.hstack((time, (R - obj.Re))), delimiter=',')
plt.figure()
plt.title("Velocity")
plt.plot(time, Vr, marker="o", label="Vr")
plt.plot(time, Vt, marker="o", label="Vt")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [m/s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "velocity" + ".png")
np.savetxt(savefig_file + "Velocity_Log" + ".csv", np.hstack((time, Vr, Vt)), delimiter=',')
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [kg]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "mass" + ".png")
np.savetxt(savefig_file + "Mass_Log" + ".csv", np.hstack((time, m)), delimiter=',')
plt.figure()
plt.title("Acceleration")
plt.plot(time, a_r, marker="o", label="Acc r")
plt.plot(time, a_t, marker="o", label="Acc t")
plt.plot(time, a_mag, marker="o", label="Acc")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Acceleration [G]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "acceleration" + ".png")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, Tr / 1000, marker="o", label="Tr")
plt.plot(time, Tt / 1000, marker="o", label="Tt")
plt.plot(time, T / 1000, marker="o", label="Thrust")
plt.plot(time, Dr / 1000, marker="o", label="Dr")
plt.plot(time, Dt / 1000, marker="o", label="Dt")
plt.plot(time, m * g / 1000, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [kN]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "force" + ".png")
plt.figure()
plt.title("Flight trajectory")
plt.plot(theta * obj.Re / 1000, (R - obj.Re) / 1000, marker="o", label="trajectory")
plt.grid()
plt.xlabel("Downrange [km]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "trajectory" + ".png")
plt.figure()
plt.title("DeltaThrust profile")
plt.plot(time, np.append(dThrust0, dThrust1), marker="o", label="dThrust")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("dThrust [N]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "dforce" + ".png")
plt.figure()
plt.title("Isp profile")
plt.plot(time, np.append(Isp0, Isp1), marker="o", label="Isp")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Isp [s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "Isp" + ".png") | mit |
jorik041/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
kai5263499/networkx | examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/gridspec.py | 10 | 15668 | """
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, e.g., 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in six.iteritems(kwargs):
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, e.g., 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
# check to make sure other has the attributes
# we need to do the comparison
if not (hasattr(other, '_gridspec') and
hasattr(other, 'num1') and
hasattr(other, 'num2')):
return False
return all((self._gridspec == other._gridspec,
self.num1 == other.num1,
self.num2 == other.num2))
def __hash__(self):
return (hash(self._gridspec) ^
hash(self.num1) ^
hash(self.num2))
| lgpl-3.0 |
jayantk/pnp | experiments/dipart/scripts/visualize/generate_heatmap.py | 1 | 1411 | #!/usr/bin/python
# Generate heatmap of points
import numpy as np
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from heatmap_data import *
# image_name=
# im = plt.imread(image_name);
# implot = plt.imshow(im);
# Load the example flights dataset and conver to long-form
# flights_long = sns.load_dataset("flights")
# flights = flights_long.pivot("month", "year", "passengers")
def sample_kde_data(data):
u = np.exp(data)
z = np.sum(u)
p = (u / z) * 1000
xs = []
ys = []
for yind in xrange(len(p)):
for xind in xrange(len(p[yind])):
c = int(p[yind][xind])
xs += [xind] * c
ys += [NUM_POINTS - yind] * c
return (np.array(xs), np.array(ys))
NUM_POINTS=25
def plot_kde(data, cmap):
(xs, ys) = sample_kde_data(data)
print len(xs)
sns.kdeplot(xs, ys, cmap=cmap, shade=True, shade_lowest=False, clip=[[0,NUM_POINTS], [0, NUM_POINTS]], alpha=0.5)
# img = plt.imread("data/dqa_parts_v1/fighter-jet/fighter-jet_0000.png")
img = plt.imread("data/dqa_parts_v1/antelope/antelope_0000.png")
fig, ax = plt.subplots()
ax.imshow(img, extent=[0, NUM_POINTS, 0, NUM_POINTS])
plot_kde(neck_data3, "Blues")
# plot_kde(leg_data2, "Reds")
# plot_kde(tail_data2, "Greens")
plt.axis('off')
plt.show()
# Draw a heatmap with the numeric values in each cell
# sns.heatmap(data, cbar=False, cmap="coolwarm")
# plt.show()
| apache-2.0 |
treycausey/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
vorwerkc/pymatgen | pymatgen/analysis/chemenv/coordination_environments/voronoi.py | 5 | 44209 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import logging
import time
import numpy as np
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import (
get_lower_and_upper_f,
my_solid_angle,
rectangle_surface_intersection,
)
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list.
Args:
bson_nb_voro_list2: List of periodic sites involved in the Voronoi.
structure: Structure object.
Returns:
The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format).
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == "None":
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd["index"]]
periodic_site = PeriodicSite(
struct_site._species,
struct_site.frac_coords + psd[1],
struct_site._lattice,
properties=struct_site.properties,
)
dd["site"] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
default_normalized_distance_tolerance = 1e-5
default_normalized_angle_tolerance = 1e-3
def __init__(
self,
structure=None,
voronoi_list2=None,
voronoi_cutoff=default_voronoi_cutoff,
isites=None,
normalized_distance_tolerance=default_normalized_distance_tolerance,
normalized_angle_tolerance=default_normalized_angle_tolerance,
additional_conditions=None,
valences=None,
maximum_distance_factor=None,
minimum_angle_factor=None,
):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method).
Args:
structure: Structure for which the Voronoi is computed.
voronoi_list2: List of voronoi polyhedrons for each site.
voronoi_cutoff: cutoff used for the voronoi.
isites: indices of sites for which the Voronoi has to be computed.
normalized_distance_tolerance: Tolerance for two normalized distances to be considered equal.
normalized_angle_tolerance:Tolerance for two normalized angles to be considered equal.
additional_conditions: Additional conditions to be used.
valences: Valences of all the sites in the structure (used when additional conditions require it).
maximum_distance_factor: The maximum distance factor to be considered.
minimum_angle_factor: The minimum angle factor to be considered.
Raises:
RuntimeError if the Voronoi cannot be constructed.
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.debug("Setting Voronoi list")
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.debug("Setting neighbors distances and angles")
t1 = time.process_time()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.process_time()
logging.debug("Neighbors distances and angles set up in {:.2f} seconds".format(t2 - t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull.
Args:
indices: indices of the sites for which the Voronoi is needed.
voronoi_cutoff: Voronoi cutoff for the search of neighbours.
Raises:
RuntimeError: If an infinite vertex is found in the voronoi construction.
"""
self.voronoi_list2 = [None] * len(self.structure)
self.voronoi_list_coords = [None] * len(self.structure)
logging.debug("Getting all neighbors in structure")
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.process_time()
logging.debug("Setting up Voronoi list :")
for jj, isite in enumerate(indices):
logging.debug(" - Voronoi analysis for site #{:d} ({:d}/{:d})".format(isite, jj + 1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError(
"This structure is pathological," " infinite vertex in the voronoi " "construction"
)
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss, tolerance=1.0e-6):
myindex = iii
break
results2.append(
{
"site": neighbors[ridge_point2],
"angle": sa,
"distance": distances[ridge_point2],
"index": myindex,
}
)
for dd in results2:
dd["normalized_angle"] = dd["angle"] / maxangle
dd["normalized_distance"] = dd["distance"] / mindist
self.voronoi_list2[isite] = results2
self.voronoi_list_coords[isite] = np.array([dd["site"].coords for dd in results2])
t2 = time.process_time()
logging.debug("Voronoi list set up in {:.2f} seconds".format(t2 - t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations.
Args:
indices: Indices of the sites for which the Voronoi is needed.
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
# Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict["normalized_distance"] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append(
{
"min": normalized_distances[isorted_distances[0]],
"max": normalized_distances[isorted_distances[0]],
}
)
self.neighbors_distances[isite].append(
{
"min": results[isorted_distances[0]]["distance"],
"max": results[isorted_distances[0]]["distance"],
}
)
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wd,
self.neighbors_normalized_distances[isite][icurrent]["max"],
rtol=0.0,
atol=self.normalized_distance_tolerance,
):
self.neighbors_normalized_distances[isite][icurrent]["max"] = wd
self.neighbors_distances[isite][icurrent]["max"] = results[idist]["distance"]
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({"min": wd, "max": wd})
self.neighbors_distances[isite].append(
{
"min": results[idist]["distance"],
"max": results[idist]["distance"],
}
)
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist + 1]
dist_dict["next"] = dist_dict_next["min"]
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict["next"] = ndist_dict_next["min"]
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]["min"]
self.neighbors_normalized_distances[isite][-1]["next"] = dfact
self.neighbors_distances[isite][-1]["next"] = dfact * self.neighbors_distances[isite][0]["min"]
# Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict["normalized_angle"] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append(
{
"max": normalized_angles[isorted_angles[0]],
"min": normalized_angles[isorted_angles[0]],
}
)
self.neighbors_angles[isite].append(
{
"max": results[isorted_angles[0]]["angle"],
"min": results[isorted_angles[0]]["angle"],
}
)
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wa,
self.neighbors_normalized_angles[isite][icurrent]["min"],
rtol=0.0,
atol=self.normalized_angle_tolerance,
):
self.neighbors_normalized_angles[isite][icurrent]["min"] = wa
self.neighbors_angles[isite][icurrent]["min"] = results[iang]["angle"]
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({"max": wa, "min": wa})
self.neighbors_angles[isite].append({"max": results[iang]["angle"], "min": results[iang]["angle"]})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict["next"] = ang_dict_next["max"]
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict["next"] = nang_dict_next["max"]
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]["next"] = afact
self.neighbors_angles[isite][-1]["next"] = afact * self.neighbors_angles[isite][0]["max"]
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(
self.AC.check_condition(
condition=ac,
structure=self.structure,
parameters={
"valences": valences,
"neighbor_index": vals["index"],
"site_index": ivoronoi,
},
)
)
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(
vals["normalized_distance"] <= dp
or np.isclose(
vals["normalized_distance"],
dp,
rtol=0.0,
atol=self.normalized_distance_tolerance / 2.0,
)
)
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(
vals["normalized_angle"] >= ap
or np.isclose(
vals["normalized_angle"],
ap,
rtol=0.0,
atol=self.normalized_angle_tolerance / 2.0,
)
)
return angle_conditions
# def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
# if self.neighbors_normalized_distances[isite] is None:
# return None
# dist_where = np.argwhere(
# np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
# if len(dist_where) == 0:
# return None
# idist = dist_where[-1][0]
# ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
# if len(ang_where) == 0:
# return None
# iang = ang_where[0][0]
# if self.additional_conditions.count(additional_condition) != 1:
# return None
# i_additional_condition = self.additional_conditions.index(additional_condition)
# return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
"""
Get the different surfaces corresponding to the different distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau * this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
"""
Get the different surfaces (using boundaries) corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site.
surface_calculation_options: Options for the boundaries.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.2, "upper": 1.8},
"angle_bounds": {"lower": 0.1, "upper": 0.8},
}
if surface_calculation_options["type"] in [
"standard_elliptic",
"standard_diamond",
"standard_spline",
]:
plot_type = {
"distance_parameter": ("initial_normalized", None),
"angle_parameter": ("initial_normalized", None),
}
else:
raise ValueError(
'Type "{}" for the surface calculation in DetailedVoronoiContainer '
"is invalid".format(surface_calculation_options["type"])
)
max_dist = surface_calculation_options["distance_bounds"]["upper"] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(
isite=isite, plot_type=plot_type, max_dist=max_dist
)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options["distance_bounds"]["lower"]
maxdist = surface_calculation_options["distance_bounds"]["upper"]
minang = surface_calculation_options["angle_bounds"]["lower"]
maxang = surface_calculation_options["angle_bounds"]["upper"]
f_lower = lower_and_upper_functions["lower"]
f_upper = lower_and_upper_functions["upper"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp + 1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap + 1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(
rectangle=((d1, d2), (ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False,
)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(
self,
isite,
surface_calculation_type=None,
max_dist=2.0,
additional_conditions=None,
):
"""
Get the different surfaces and their cn_map corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(
isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=max_dist,
)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
"""
Get the different surfaces (using boundaries) and their cn_map corresponding to the different
distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_options: Options for the boundaries.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
"""
Get the neighbors of a given site corresponding to a given distance and angle factor.
Args:
isite: Index of the site.
distfactor: Distance factor.
angfactor: Angle factor.
additional_condition: Additional condition to be used (currently not implemented).
Returns:
List of neighbors of the given site for the given distance and angle factors.
"""
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd["min"]:
idist = iwd
dfact = wd["max"]
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa["max"]:
iang = iwa
afact = wa["min"]
else:
break
if idist is None or iang is None:
raise ValueError("Distance or angle parameter not found ...")
return [
nb
for nb in self.voronoi_list2[isite]
if nb["normalized_distance"] <= dfact and nb["normalized_angle"] >= afact
]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
"""
Get the different boundaries and limits of the distance and angle factors for the given site.
Args:
isite: Index of the site.
plot_type: Types of distance/angle parameters to get.
max_dist: Maximum distance factor.
Returns:
Distance and angle bounds and limits.
"""
# Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {
"distance_parameter": ("initial_inverse_opposite", None),
"angle_parameter": ("initial_opposite", None),
}
dd = [dist["min"] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type["distance_parameter"][0] == "initial_normalized":
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type["distance_parameter"][0] == "initial_inverse_opposite":
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type["distance_parameter"][0] == "initial_inverse3_opposite":
ddinv = [1.0 / dist ** 3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the distance is not implemented".format(plot_type["distance_parameter"])
)
if plot_type["angle_parameter"][0] == "initial_normalized":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type["angle_parameter"][0] == "initial_opposite":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the angle is not implemented".format(plot_type["angle_parameter"])
)
ang_limits = [0.0, 1.0]
return {
"distance_bounds": distance_bounds,
"distance_limits": dist_limits,
"angle_bounds": angle_bounds,
"angle_limits": ang_limits,
}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
"""
Whether two DetailedVoronoiContainer objects are close to each other.
Args:
other: Another DetailedVoronoiContainer to be compared with.
rtol: Relative tolerance to compare values.
atol: Absolute tolerance to compare values.
Returns:
True if the two DetailedVoronoiContainer are close to each other.
"""
isclose = (
np.isclose(
self.normalized_angle_tolerance,
other.normalized_angle_tolerance,
rtol=rtol,
atol=atol,
)
and np.isclose(
self.normalized_distance_tolerance,
other.normalized_distance_tolerance,
rtol=rtol,
atol=atol,
)
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
return False
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb["site"] == nb2["site"]:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb["distance"], nb_other["distance"], rtol=rtol, atol=atol):
return False
if not np.isclose(nb["angle"], nb_other["angle"], rtol=rtol, atol=atol):
return False
if not np.isclose(
nb["normalized_distance"],
nb_other["normalized_distance"],
rtol=rtol,
atol=atol,
):
return False
if not np.isclose(
nb["normalized_angle"],
nb_other["normalized_angle"],
rtol=rtol,
atol=atol,
):
return False
if nb["index"] != nb_other["index"]:
return False
if nb["site"] != nb_other["site"]:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Radial Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize distances.
figsize: Size of the figure.
step_function: Type of step function to be used for the RDF.
Returns:
Matplotlib figure.
"""
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "normal_cdf", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([dd["min"] for dd in dists])
sorted_dists = [dists[ii]["min"] for ii in isorted]
dnb_dists = [len(dists[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_dists[idist])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
mydists = [dp_func(dd["min"]) for dd in dists]
mydcns = [len(dd["dnb_indices"]) for dd in dists]
xx = np.linspace(0.0, 1.1 * max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Solid Angle Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize angles.
figsize: Size of the figure.
step_function: Type of step function to be used for the SADF.
Returns:
Matplotlib figure.
"""
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "step_function", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([ap_func(aa["min"]) for aa in angs])
sorted_angs = [ap_func(angs[ii]["min"]) for ii in isorted]
dnb_angs = [len(angs[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_angs[iang])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
myangs = [ap_func(aa["min"]) for aa in angs]
mydcns = [len(dd["dnb_indices"]) for dd in angs]
xx = np.linspace(0.0, 1.1 * max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (
self.normalized_angle_tolerance == other.normalized_angle_tolerance
and self.normalized_distance_tolerance == other.normalized_distance_tolerance
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
and self.voronoi_list2 == other.voronoi_list2
and self.structure == other.structure
)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
Returns:
[vlist, bson_nb_voro_list], to be used in the as_dict method.
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == "None":
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict["site"]
site_dict = {key: val for key, val in nb_dict.items() if key not in ["site"]}
# site_voro.append([ps.as_dict(), dd]) [float(c) for c in self.frac_coords]
diff = site.frac_coords - self.structure[nb_dict["index"]].frac_coords
site_voro.append([[nb_dict["index"], [float(c) for c in diff]], site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
Returns:
dictionary that is BSON-encodable.
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
Args:
d: dict representation of the VoronoiContainer object.
Returns:
VoronoiContainer object.
"""
structure = Structure.from_dict(d["structure"])
voronoi_list2 = from_bson_voronoi_list2(d["bson_nb_voro_list2"], structure)
maximum_distance_factor = d["maximum_distance_factor"] if "maximum_distance_factor" in d else None
minimum_angle_factor = d["minimum_angle_factor"] if "minimum_angle_factor" in d else None
return cls(
structure=structure,
voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d["normalized_angle_tolerance"],
normalized_distance_tolerance=d["normalized_distance_tolerance"],
additional_conditions=d["additional_conditions"],
valences=d["valences"],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor,
)
| mit |
adit-chandra/tensorflow | tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/compare_1k.py | 11 | 5011 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
| apache-2.0 |
cdondrup/strands_qsr_lib | qsr_lib/dbg/dbg_template_bounding_boxes_qsrs.py | 8 | 2711 | #!/usr/bin/python
# import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class Dbg(object):
def __init__(self):
pass
def return_bounding_box_2d(self, x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
def compute_qsr(self, bb1, bb2):
"""Wrapper for __compute_qsr
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: an RCC depending on your implementation
"""
return self.__compute_qsr(bb1, bb2)
def __compute_qsr(self, bb1, bb2):
"""Replace with your own
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2) :return: an RCC depending on your implementation
:return: an RCC depending on your implementation
"""
raise NotImplementedError("Replace with your code")
def plot_bbs(bb1, bb2):
plt.figure()
ax = plt.gca()
# ax.invert_yaxis()
ax.add_patch(Rectangle((bb1[0], bb1[1]), bb1[2]-bb1[0], bb1[3]-bb1[1], alpha=1, facecolor="blue"))
ax.annotate("o1", (bb1[0], bb1[1]), color='black', weight='bold', fontsize=14)
ax.add_patch(Rectangle((bb2[0], bb2[1]), bb2[2]-bb2[0], bb2[3]-bb2[1], alpha=1, facecolor="red"))
ax.annotate("o2", (bb2[0], bb2[1]), color='black', weight='bold', fontsize=14)
h = 6
l = 0
# ax.set_xlim(l, h)
# ax.set_ylim(l, h)
ax.set_xlim(l, h)
ax.set_ylim(h, l)
plt.show()
if __name__ == '__main__':
dbg = Dbg()
# Play with these to test (x_center, y_center, xsize(i.e. x-size), ysize(i.e. y-size))
o1 = (2.0, 2.0, 2., 2.)
o2 = (4.0, 3.0, 1., 1.)
o1 = dbg.return_bounding_box_2d(o1[0], o1[1], o1[2], o1[3])
o2 = dbg.return_bounding_box_2d(o2[0], o2[1], o2[2], o2[3])
# Bounding boxes
# print("o1:", o1)
# print("o2:", o2)
# Relations
print("o1o2:", dbg.compute_qsr(o1, o2))
print("o2o1:", dbg.compute_qsr(o2, o1))
# Plot the boxes
plot_bbs(o1, o2)
| mit |
matthew-tucker/mne-python | mne/viz/tests/test_topo.py | 7 | 4728 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from collections import namedtuple
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs
from mne import pick_channels_evoked
from mne.channels import read_layout
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
mne_analyze_colormap)
from mne.viz.topo import _plot_update_evoked_topo
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 340, 341, 342] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_topo():
"""Test plotting of ERP topography
"""
import matplotlib.pyplot as plt
# Show topography
evoked = _get_epochs().average()
plot_topo(evoked) # should auto-find layout
warnings.simplefilter('always', UserWarning)
picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
with warnings.catch_warnings(record=True):
for ylim in [dict(mag=[-600, 600]), None]:
plot_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', 'inaxes')
func(event(inaxes=fig.axes[0]))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
_plot_update_evoked_topo(params, bools)
# should auto-generate layout
plot_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True)
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography
"""
import matplotlib.pyplot as plt
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
cmap = mne_analyze_colormap(format='matplotlib')
plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data
"""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
vmin=0., vmax=14., show=False)
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
run_tests_if_main()
| bsd-3-clause |
amitsela/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 9 | 4504 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(argv=pipeline_args)
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
# Group each coordinate triplet by its x value, then write the coordinates to
# the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda (k, coords): ' '.join('(%s, %s, %s)' % coord for coord in coords))
| WriteToText(known_args.coordinate_output))
# pylint: enable=expression-not-assigned
return p.run().wait_until_finish()
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
ElessarWebb/dummy | src/dummy/viewer/formatting/plotformatters.py | 1 | 2788 | from dummy.viewer.formatting import ResultFormatter, Formatter
import logging
logger = logging.getLogger( __name__ )
try:
import pylab
import numpy
@Formatter.register( 'plot' )
class PlotFormatter( ResultFormatter ):
def __init__( self, *args, **kwargs ):
super( PlotFormatter, self ).__init__( self, *args, **kwargs )
# create the figure
self.figure = pylab.figure( facecolor='white' )
def format_results( self, results, *metrics ):
self.setup( results )
try:
self.plot( results, metrics )
except ( ValueError, TypeError ) as e:
raise Exception(
"Non numeric metrics cannot be plotted"
)
def setup( self, results ):
# get the xlabels
x = range( 1, len( results ) + 1 )
xlabels = [ r.test.name for r in results ]
pylab.title( 'Metric values per test (commit: %s)' % results[0].commit, fontsize=22 )
pylab.xticks( rotation=90 )
pylab.grid( True, markevery='integer' )
pylab.xlabel( 'tests', fontsize=16 )
pylab.margins( 0.05 )
pylab.xticks( x, xlabels )
def plot( self, results, metrics, **opts ):
# create the plots
plots = []
for metric in metrics:
plots.append( self.plot_metric( results, metric , **opts ))
# legendary
pylab.legend([ p[0] for p in plots], metrics )
# and show it
pylab.show()
def plot_metric( self, results, metric, **opts ):
x = range( 1, len( results ) + 1 )
y = [ t.get_metric( metric ) for t in results ]
try:
plot = pylab.plot( x, y, **opts )
pylab.setp( plot,
label=metric,
linestyle='dashed',
linewidth=1.0,
marker=".",
markersize=12.0,
aa=True
)
return plot
except ( ValueError, TypeError ) as e:
raise Exception(
"The metric `%s` is not numeric and can thus not be plotted." % metric
)
@Formatter.register( 'plot.bar' )
class BarPlotFormatter( PlotFormatter ):
def plot( self, results, metrics, **opts ):
# create the plots
plots = []
x = numpy.arange( len( results ))
margin = 0.2 / len( metrics )
width = 0.8 / len( metrics )
colors = [
( i/( 2 * len( metrics )), i/len(metrics), 0.8 )
for i in range( 1, len( metrics ) + 1)
]
for i, metric in enumerate( metrics ):
# compute the bar heights
y = [ t.get_metric( metric ) or 0 for t in results ]
plot = self.bar(
x + 0.5 + i*width + ( i ) * margin,
y,
width=width,
color=colors[i],
)
plots.append( plot )
pylab.setp( plot,
label=metric,
aa=True
)
# legendary
pylab.legend([ p[0] for p in plots], metrics )
# and show it
pylab.show()
def bar( self, *args, **kwargs ):
return pylab.bar( *args, **kwargs )
except ImportError:
logger.debug( "matplotlib is not installed, PlotFormatter not available." )
| mit |
chriscrosscutler/scikit-image | doc/ext/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
leogulus/pisco_pipeline | pisco_photometry_all_2019.py | 1 | 76497 | import sys, os, re, yaml, subprocess, shlex, FITS_tools
import pandas as pd
import numpy as np
import pickle
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib.cm as cm
import matplotlib.image as mpimg
from scipy.optimize import curve_fit
import scipy.integrate as integrate
from scipy import interpolate
from scipy.interpolate import interp1d
import scipy.stats
from astropy.io import fits
from astropy.table import Table, join
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=71, Om0=0.3, Tcmb0=2.725)
import extra_program as ex
from PIL import Image as Image_PIL
import ebvpy #Galactic Reddening
"""
Example:
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf allslr 2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf allslr no2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf noslr no2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 auto noslr no2mass
field: name of the fields
mode: psf, auto, aper, hybrid, model
allslr:
- allslr: run everything including photometry_v4, cut_frame, SLR
- slr: run just SLR and update the color
- noslr: don't run slr, just update the color with different modes
2mass
- 2mass: run SLR with 2MASS to match
- no2mass: run SLR without 2MASS
"""
###--------------------------------------------------------------------------###
def find_seeing(field,band):
df_see=pd.read_csv('/Users/taweewat/Documents/red_sequence/total_chips_field_seeing.csv',index_col=0)
if field[0:5]=='CHIPS':
seeing = df_see[df_see.chips==field]['seeing_q25_%s'%band].values[0] #_%s'%band
return seeing
elif (field[0:5]=='Field')|(field[0:3]=='PKS')|(field[0:4]=='SDSS'):
seeing = df_see[df_see.name==field]['seeing_q25_%s'%band].values[0] #_%s'%band
return seeing
def find_seeing_new(dir,field):
myReg3=re.compile(r'(CHIPS)[^\_]*\_[^\_]*')
seeing = float(fits.open(list_file_name(dir,myReg3.search(field).group())[0])[0].header['FWHM1'])
return seeing
def find_seeing_fits(field):
home='/Users/taweewat/Documents/pisco_code/'
dirs=['ut170103/','ut170104/','ut170619/','ut170621/','ut170624/','ut171208/',\
'ut171209/','ut171212/','ut190412/','ut190413/']
myReg=re.compile(r'(%s_A).*'%field)
for di in dirs:
dir=home+di
for text in os.listdir(dir):
if myReg.search(text) != None:
seeing=float(fits.open(dir+myReg.search(text).group())[0].header['FWHM1'])
return seeing
def read_param():
with open("pisco_pipeline/params.yaml", 'r') as stream:
try:
param=yaml.load(stream, Loader=yaml.FullLoader)
return param
except yaml.YAMLError as exc:
print(exc)
def read_param_izp(mode):
if mode=='psf':
mode_izp=''
elif mode=='model':
mode_izp='' #'_model'
else:
mode_izp=''
# print "/Users/taweewat/Documents/pisco_code/pisco_pipeline/params_izeropoint%s.yaml" % mode_izp
with open("/Users/taweewat/Documents/pisco_code/pisco_pipeline/params_izeropoint%s.yaml"%mode_izp, 'r') as stream:
try:
param=yaml.load(stream, Loader=yaml.FullLoader)
return param
except yaml.YAMLError as exc:
print(exc)
def star_galaxy_bleem(field):
sg_dir = 'star_galaxy'
if not os.path.exists(sg_dir):
os.makedirs(sg_dir)
param=read_param()
# seeing=find_seeing(field,'i')
# seeing=find_seeing_fits(field)
seeing = 1.0
# seeing=1.5
# seeing=0.95
minarea=1.7
data, header = fits.getdata('final/coadd_c%s_i.fits'%field, header=True)
data2=data**2
# pxscale=0.11
pxscale=0.22
fits.writeto('final/coadd_c%s_sq_i.fits'%field, data2, header=header, overwrite=True)
cmd='sex final/coadd_c%s_i.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -CATALOG_TYPE FITS_1.0 -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 15 -PIXEL_SCALE %s -DETECT_MINAREA %s -CHECKIMAGE_NAME checki.fits,segmenti.fits'%\
(field,'sex.param',sg_dir+'/%s_catalog.fits'%(field),str(seeing),str(param['satur_level_i_psf']),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2)); print cmd
subprocess.check_call(shlex.split(cmd))
cmd='sex final/coadd_c%s_i.fits,final/coadd_c%s_sq_i.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -CATALOG_TYPE FITS_1.0 -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 15 -PIXEL_SCALE %s -DETECT_MINAREA %s'%\
(field,field,'sex.param',sg_dir+'/%s_sq_catalog.fits'%(field),str(seeing),str(param['satur_level_i_sq_psf']),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2)); print cmd
subprocess.check_call(shlex.split(cmd))
def pisco_photometry_v4(field):
def aperature_proj(field,band):
param=read_param()
seeing=find_seeing(field,band)
# seeing=find_seeing_fits(field)
# seeing = 1.1
# seeing=1.5
slrdir = 'slr_output'
to_be_projected = 'final/coadd_c%s_%s.fits'%(field,band)
reference_fits = 'final/coadd_c%s_i.fits'%field
im1,im2, header = FITS_tools.match_fits(to_be_projected,reference_fits,return_header=True)
outname = 'final/proj_coadd_c%s_%s.fits'%(field,band)
print 'projecting from %s band to i band the fits file '%band + outname
fits.writeto(outname, im1, header, overwrite=True)
minarea=1.7 #1.7
pxscale=0.22
# pxscale=0.11
cmd='sex final/coadd_c%s_%s.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 23 -PIXEL_SCALE %s -DETECT_MINAREA %s -CHECKIMAGE_NAME check_psf_%s.fits,segment_psf_%s.fits'%\
(field,band,'sex_psf.param','psfex_output/psf_%s_%s.fits'%(field,band),str(seeing),str(param['satur_level_%s_psf'%band]),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2), band, band)
print cmd
subprocess.check_call(shlex.split(cmd))
Tf=Table(fits.open('psfex_output/psf_%s_%s.fits'%(field,band))[2].data)
# Tfcut = Tf[(Tf['CLASS_STAR'] > 0.97) & (Tf['FLAGS'] == 0)].copy() #0.97 Field292
if len(Tf[(Tf['CLASS_STAR'] > 0.95) & (Tf['FLAGS'] < 5)]) > 0:
Tfcut = Tf[(Tf['CLASS_STAR'] > 0.95) & (Tf['FLAGS'] < 5)].copy()
else:
Tfcut = Tf[(Tf['CLASS_STAR'] > 0.9) & (Tf['FLAGS'] < 5)].copy()
# Tfcut = Tf[(Tf['CLASS_STAR'] > 0.9) & (Tf['FLAGS'] < 5)].copy() #0.97 Field292
Tfcut_edge=Tfcut[(Tfcut['XWIN_IMAGE']<np.max(Tfcut['XWIN_IMAGE'])-60)&(Tfcut['XWIN_IMAGE']>np.min(Tfcut['XWIN_IMAGE'])+60)&\
(Tfcut['YWIN_IMAGE']<np.max(Tfcut['YWIN_IMAGE'])-60)&(Tfcut['YWIN_IMAGE']>np.min(Tfcut['YWIN_IMAGE'])+60)].copy()
Tfcut_more=Tfcut_edge[(np.abs(Tfcut_edge['FLUX_RADIUS']-np.mean(Tfcut_edge['FLUX_RADIUS']))<2*np.std(Tfcut_edge['FLUX_RADIUS']))]
Tfcut_more2=Tfcut_more[(np.abs(Tfcut_more['ELONGATION']-np.mean(Tfcut_more['ELONGATION']))<2*np.std(Tfcut_more['ELONGATION']))].copy()
print "length of Tf: all: {}, CS>0.97: {}, edges: {}, flux_radius: {}, elong: {}".format(len(Tf), len(Tfcut), len(Tfcut_edge), len(Tfcut_more), len(Tfcut_more2))
hdu = fits.open('psfex_output/psf_%s_%s.fits'%(field,band))
hdu[2].data = hdu[2].data[Tfcut_more2['NUMBER']-1]
# hdu[2].data = hdu[2].data[Tfcut['NUMBER']-1]
hdu.writeto('psfex_output/psf_%s_%s.fits'%(field,band), overwrite=True)
cmd='psfex %s -c pisco_pipeline/pisco.psfex' % ('psfex_output/psf_%s_%s.fits'%(field,band))
print cmd
subprocess.check_call(shlex.split(cmd))
# minarea=3.0
cmd='sex final/coadd_c%s_i.fits,final/proj_coadd_c%s_%s.fits -c pisco_pipeline/config.sex -PSF_NAME %s -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PIXEL_SCALE %s -CATALOG_TYPE FITS_1.0 -PHOT_APERTURES 23 -DETECT_MINAREA %s -CHECKIMAGE_NAME check%s.fits,segment%s.fits'%\
(field, field, band, 'psfex_output/psf_%s_%s.psf' % (field, band), 'sex_after_psf.param', '%s/a_psf_%s_%s.fits' % (slrdir, field, band),
str(seeing), str(param['satur_level_%s_psf' % band]), str(pxscale), str(1.1 / minarea * np.pi * (seeing / pxscale)**2), band, band)
print cmd
subprocess.check_call(shlex.split(cmd))
table=Table.read('%s/a_psf_%s_%s.fits'%(slrdir,field,band))
for name in table.colnames[:]:
table.rename_column(name, name + '_%s' % band)
return table
slrdir = 'slr_output'
if not os.path.exists(slrdir):
os.makedirs(slrdir)
tableg=aperature_proj(field,'g')
tablei=aperature_proj(field,'i')
tabler=aperature_proj(field,'r')
tablez=aperature_proj(field,'z')
print 'len of all table', len(tableg), len(tablei), len(tabler), len(tablez)
ci=SkyCoord(ra=np.array(tablei['ALPHA_J2000_i'])*u.degree, dec=np.array(tablei['DELTA_J2000_i'])*u.degree)# print len(ci)
cg=SkyCoord(ra=np.array(tableg['ALPHA_J2000_g'])*u.degree, dec=np.array(tableg['DELTA_J2000_g'])*u.degree)# print len(cg)
cr=SkyCoord(ra=np.array(tabler['ALPHA_J2000_r'])*u.degree, dec=np.array(tabler['DELTA_J2000_r'])*u.degree)# print len(cr)
cz=SkyCoord(ra=np.array(tablez['ALPHA_J2000_z'])*u.degree, dec=np.array(tablez['DELTA_J2000_z'])*u.degree)# print len(cz)
idxn, d2dn, d3dn=cg.match_to_catalog_sky(ci)
# Table_I=tablei[idxn][['NUMBER_i','XWIN_IMAGE_i','YWIN_IMAGE_i','ALPHA_J2000_i','DELTA_J2000_i','MAG_APER_i','MAGERR_APER_i','MAG_AUTO_i','MAGERR_AUTO_i','MAG_HYBRID_i','MAGERR_HYBRID_i',\
# 'CLASS_STAR_i','FLAGS_i','MAG_PSF_i','MAGERR_PSF_i','MAG_MODEL_i','MAGERR_MODEL_i','SPREAD_MODEL_i']]
Table_I=tablei[idxn][['NUMBER_i','XWIN_IMAGE_i','YWIN_IMAGE_i','ALPHA_J2000_i','DELTA_J2000_i','MAG_APER_i','MAGERR_APER_i','MAG_AUTO_i','MAGERR_AUTO_i','MAG_SPHEROID_i','MAGERR_SPHEROID_i',\
'CLASS_STAR_i','FLAGS_i','MAG_PSF_i','MAGERR_PSF_i','MAG_MODEL_i','MAGERR_MODEL_i','SPREAD_MODEL_i','SPREADERR_MODEL_i','MAG_ISO_i','MAGERR_ISO_i']]
Table_I.rename_column('ALPHA_J2000_i','ALPHA_J2000')
Table_I.rename_column('DELTA_J2000_i','DELTA_J2000')
idxn, d2dn, d3dn=cg.match_to_catalog_sky(cr)
# Table_R=tabler[idxn][['NUMBER_r','ALPHA_J2000_r','DELTA_J2000_r','MAG_APER_r','MAGERR_APER_r','MAG_AUTO_r','MAGERR_AUTO_r','MAG_HYBRID_r','MAGERR_HYBRID_r',\
# 'CLASS_STAR_r','FLAGS_r','MAG_PSF_r','MAGERR_PSF_r','MAG_MODEL_r','MAGERR_MODEL_r','SPREAD_MODEL_r']]
Table_R=tabler[idxn][['NUMBER_r','ALPHA_J2000_r','DELTA_J2000_r','MAG_APER_r','MAGERR_APER_r','MAG_AUTO_r','MAGERR_AUTO_r','MAG_SPHEROID_r','MAGERR_SPHEROID_r',\
'CLASS_STAR_r','FLAGS_r','MAG_PSF_r','MAGERR_PSF_r','MAG_MODEL_r','MAGERR_MODEL_r','SPREAD_MODEL_r','SPREADERR_MODEL_r','MAG_ISO_r','MAGERR_ISO_r']]
Table_R.rename_column('ALPHA_J2000_r','ALPHA_J2000')
Table_R.rename_column('DELTA_J2000_r','DELTA_J2000')
idxn, d2dn, d3dn=cg.match_to_catalog_sky(cz)
# Table_Z=tablez[idxn][['NUMBER_z','ALPHA_J2000_z','DELTA_J2000_z','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_z','MAGERR_AUTO_z','MAG_HYBRID_z','MAGERR_HYBRID_z',\
# 'CLASS_STAR_z','FLAGS_z','MAG_PSF_z','MAGERR_PSF_z','MAG_MODEL_z','MAGERR_MODEL_z','SPREAD_MODEL_z']]
Table_Z=tablez[idxn][['NUMBER_z','ALPHA_J2000_z','DELTA_J2000_z','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_z','MAGERR_AUTO_z','MAG_SPHEROID_z','MAGERR_SPHEROID_z',\
'CLASS_STAR_z','FLAGS_z','MAG_PSF_z','MAGERR_PSF_z','MAG_MODEL_z','MAGERR_MODEL_z','SPREAD_MODEL_z','SPREADERR_MODEL_z','MAG_ISO_z','MAGERR_ISO_z']]
Table_Z.rename_column('ALPHA_J2000_z','ALPHA_J2000')
Table_Z.rename_column('DELTA_J2000_z','DELTA_J2000')
# Table_G=tableg[['NUMBER_g','ALPHA_J2000_g','DELTA_J2000_g','MAG_APER_g','MAGERR_APER_g','MAG_AUTO_g','MAGERR_AUTO_g','MAG_HYBRID_g','MAGERR_HYBRID_g',\
# 'CLASS_STAR_g','FLAGS_g','MAG_PSF_g','MAGERR_PSF_g','MAG_MODEL_g','MAGERR_MODEL_g','SPREAD_MODEL_g']]
Table_G = tableg[['NUMBER_g', 'ALPHA_J2000_g', 'DELTA_J2000_g', 'MAG_APER_g', 'MAGERR_APER_g', 'MAG_AUTO_g', 'MAGERR_AUTO_g', 'MAG_SPHEROID_g', 'MAGERR_SPHEROID_g',
'CLASS_STAR_g','FLAGS_g','MAG_PSF_g','MAGERR_PSF_g','MAG_MODEL_g','MAGERR_MODEL_g','SPREAD_MODEL_g','SPREADERR_MODEL_g','MAG_ISO_g','MAGERR_ISO_g']]
Table_G.rename_column('ALPHA_J2000_g','ALPHA_J2000')
Table_G.rename_column('DELTA_J2000_g','DELTA_J2000')
print 'len of all new table', len(Table_G), len(Table_I), len(Table_R), len(Table_Z)
total=join(join(join(Table_I,Table_G,keys=['ALPHA_J2000','DELTA_J2000']),Table_R,keys=['ALPHA_J2000','DELTA_J2000']),\
Table_Z,keys=['ALPHA_J2000','DELTA_J2000'])
# total=join(join(join(mag_ii,mag_ig,keys='NUMBER'), mag_ir,keys='NUMBER'),\
# mag_iz,keys='NUMBER')
# total2=total[['ALPHA_J2000','DELTA_J2000','NUMBER_i','NUMBER_r','NUMBER_g','XWIN_IMAGE_i','YWIN_IMAGE_i',\
# 'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
# 'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
# 'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
# 'MAGERR_AUTO_z','MAG_HYBRID_i','MAGERR_HYBRID_i','MAG_HYBRID_g',\
# 'MAGERR_HYBRID_g','MAG_HYBRID_r','MAGERR_HYBRID_r','MAG_HYBRID_z',\
# 'MAGERR_HYBRID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
# 'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
# 'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
# 'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
# 'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
# 'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
# 'SPREAD_MODEL_i','SPREAD_MODEL_z',]]
total.write(os.path.join(slrdir, 'total0_psf_%s.csv' % field), overwrite=True)
total2=total[['ALPHA_J2000','DELTA_J2000','NUMBER_i','NUMBER_r','NUMBER_g','XWIN_IMAGE_i','YWIN_IMAGE_i',\
'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
'MAGERR_AUTO_z','MAG_ISO_g','MAGERR_ISO_g','MAG_ISO_r','MAGERR_ISO_r',\
'MAG_ISO_i','MAGERR_ISO_i','MAG_ISO_z','MAGERR_ISO_z',\
'MAG_SPHEROID_i','MAGERR_SPHEROID_i','MAG_SPHEROID_g',\
'MAGERR_SPHEROID_g','MAG_SPHEROID_r','MAGERR_SPHEROID_r','MAG_SPHEROID_z',\
'MAGERR_SPHEROID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
'SPREAD_MODEL_i','SPREAD_MODEL_z','SPREADERR_MODEL_g','SPREADERR_MODEL_r',\
'SPREADERR_MODEL_i','SPREADERR_MODEL_z']]
total2.write(os.path.join(slrdir, 'total_psf_%s.csv' % field), overwrite=True)
# total2.write(slrdir+'/all_psf_%s.fits' % field, overwrite=True)
def pisco_cut_star(field,c_a,c_b,c_d,c_delta):
seeing=find_seeing_fits(field)
true_seeing=find_seeing(field,'i')
df_i=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/%s_catalog.fits'%field)[1].data).to_pandas()
df_isq=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/%s_sq_catalog.fits'%field)[1].data).to_pandas()
#cut the object out so that it has the same number of object between the sq catalog list and the psf mag list.
fname = "/Users/taweewat/Documents/pisco_code/slr_output/total_psf_%s.csv"%field
df0 = pd.read_csv(fname)
df0['NUMBER'] = np.arange(0, len(df0), 1).tolist()
cf_i=SkyCoord(ra=np.array(df_i['ALPHA_J2000'])*u.degree, dec=np.array(df_i['DELTA_J2000'])*u.degree)
cf_isq=SkyCoord(ra=np.array(df_isq['ALPHA_J2000'])*u.degree, dec=np.array(df_isq['DELTA_J2000'])*u.degree)
cf0=SkyCoord(ra=np.array(df0['ALPHA_J2000'])*u.degree, dec=np.array(df0['DELTA_J2000'])*u.degree)
df0.rename(columns={'ALPHA_J2000': 'ALPHA_J2000_i'}, inplace=True)
df0.rename(columns={'DELTA_J2000': 'DELTA_J2000_i'}, inplace=True)
idxn, d2dn, d3dn=cf0.match_to_catalog_sky(cf_i)
df_i_cut0=df_i.loc[idxn].copy()
df_i_cut0['NUMBER']=np.arange(0,len(df0),1).tolist()
df_i_cut=pd.merge(df_i_cut0,df0,on='NUMBER')
idxn, d2dn, d3dn=cf0.match_to_catalog_sky(cf_isq)
df_isq_cut0=df_isq.loc[idxn].copy()
df_isq_cut0['NUMBER']=np.arange(0,len(df0),1).tolist()
df_isq_cut=pd.merge(df_isq_cut0,df0,on='NUMBER')
fig,ax=plt.subplots(2,3,figsize=(15,10))
df_i0=df_i_cut[(df_i_cut.MAG_APER<0)&(df_isq_cut.MAG_APER<0)]
df_isq0=df_isq_cut[(df_i_cut.MAG_APER<0)&(df_isq_cut.MAG_APER<0)]# print len(df_i), len(df_isq)
# c_d=-7.5
df_i2=df_i0[(df_i0.CLASS_STAR>c_a) & (df_i0.MAG_APER<c_d)]# & (df_i0.MAG_APER>c_c)]
df_isq2=df_isq0[(df_i0.CLASS_STAR>c_a) & (df_i0.MAG_APER<c_d)]# & (df_i0.MAG_APER>c_c)];# print len(df_i2), len(df_isq2)
icut_per=np.percentile(df_i2.MAG_APER,35) #35
df_i3=df_i2[df_i2.MAG_APER>icut_per]
df_isq3=df_isq2[df_i2.MAG_APER>icut_per]
fit=np.polyfit(df_i3.MAG_APER, df_i3.MAG_APER-df_isq3.MAG_APER, 1)
f=np.poly1d(fit)
ax[0,0].plot(df_i2.MAG_APER,f(df_i2.MAG_APER),'--')
res=(df_i3.MAG_APER-df_isq3.MAG_APER)-f(df_i3.MAG_APER)
aa=np.abs(res)<1.5*np.std(res)
# outl=np.abs(res)>=1.5*np.std(res)
fit=np.polyfit(df_i3.MAG_APER[aa], df_i3.MAG_APER[aa]-df_isq3.MAG_APER[aa], 1)
f=np.poly1d(fit)
ax[0,0].axvline(icut_per,color='blue',label='35th quantile')
ax[0,0].errorbar(df_i2.MAG_APER,df_i2.MAG_APER-df_isq2.MAG_APER,yerr=np.sqrt(df_i2.MAGERR_APER**2+df_isq2.MAGERR_APER**2),fmt='o')
ax[0,0].set_title('only for star')
ax[0,0].plot(df_i2.MAG_APER,f(df_i2.MAG_APER),'--',label='no outlier')
ax[0,0].set_ylabel('MAG_APER-MAG_APER_sq')
ax[0,0].set_xlabel('MAG APER i')
#---> #0.1 default, 0.2
c_c=df_i2[f(df_i2.MAG_APER)-(df_i2.MAG_APER-df_isq2.MAG_APER)<0.1]['MAG_APER'].values\
[np.argmin(df_i2[f(df_i2.MAG_APER)-(df_i2.MAG_APER-df_isq2.MAG_APER)<0.1]['MAG_APER'].values)] #edit10/30 (previous 0.1)
#--->
ax[0,0].axvline(c_c,color='red',label='new upper cut')
ax[0,0].legend(loc='best')
# color_axis='CLASS_STAR'
color_axis='SPREAD_MODEL_i'
ax[0,1].scatter(df_i0.MAG_APER,df_i0.MAG_APER-df_isq0.MAG_APER,marker='.',c=df_i0[color_axis],vmin=0., vmax=0.005)
ax[0,1].plot(df_i3.MAG_APER,df_i3.MAG_APER-df_isq3.MAG_APER,'x')
ax[0,1].set_title('for all objects')
ax[0,1].set_ylabel('MAG_APER-MAG_APER_sq')
ax[0,1].set_xlabel('MAG APER i')
ax[0,1].axvline(c_b,ls='--')
ax[0,1].axvline(c_c,ls='--')
delta=(df_i0.MAG_APER-df_isq0.MAG_APER) - f(df_i0.MAG_APER)
ax[0,2].scatter(df_i0.MAG_APER,delta,marker='.',c=df_i0[color_axis],vmin=0., vmax=0.005)
ax[0,2].axhline(0,ls='--')
ax[0,2].axvline(c_c,ls='--')
ax[0,2].axvline(c_b,ls='--')
ax[0,2].set_ylabel('Delta')
ax[0,2].set_xlabel('MAG APER i')
ax[0,2].set_ylim(0.5,-1.2)
df_i1=df_i0[(df_i0.MAG_APER>c_c)&(df_i0.MAG_APER<c_b)].copy()
df_isq1=df_isq0[(df_i0.MAG_APER>c_c)&(df_i0.MAG_APER<c_b)].copy()
delta1=(df_i1.MAG_APER-df_isq1.MAG_APER) - f(df_i1.MAG_APER)
ax[1,0].scatter(df_i1.MAG_APER, delta1, marker='o', c=df_i1[color_axis],vmin=0., vmax=0.005)
ax[1,0].axhline(0,ls='--')
ax[1,0].axhline(c_delta, ls='--')
ax[1,0].set_ylabel('Delta')
ax[1,0].set_xlabel('MAG APER i')
ax[1,0].set_ylim(0.5,-2)
# deltag=delta1[delta1<c_delta] #galaxy 0.1, 0.2 (0.005), 0.5 ()
deltas=delta1[(delta1>=c_delta)&(delta1<3.)] #star
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
p0 = [1., 0., 0.1]
# def gauss(x, *p):
# A, sigma = p
# return A*np.exp(-(x-0)**2/(2.*sigma**2))
# p0 = [1., 0.1]
#galaxy
# hist, bin_edges = np.histogram(deltag,bins=np.arange(-1.2,0.5,0.02))
hist, bin_edges = np.histogram(delta1,bins=np.arange(-1.2,0.5,0.02))
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
ax[1,1].plot(bin_centres, hist, label='galaxies',linestyle='steps')
#stars
hist, bin_edges = np.histogram(deltas,bins=np.arange(-1,0.5,0.02)) #(0 vs -1,0.5,0.02)
# hist, bin_edges = np.histogram(delta1, bins=np.arange(c_delta, 0.5, 0.02))
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
coeff2, var_matrix = curve_fit(gauss, bin_centres, hist, p0=p0)
ax[1,1].plot(bin_centres, hist, label='stars',linestyle='steps')
# hist, bin_edges = np.histogram(delta1,bins=np.arange(-1.2,0.5,0.02)) #added for right gaussian fitting
# bin_centres = (bin_edges[:-1] + bin_edges[1:])/2 # added for right gaussian fitting
x=np.arange(-1.25,0.5,0.02)
# hist_fit2 = gauss(x, *coeff2)
hist_fit2 = gauss(x, *coeff2)
hist_fit3 = gauss(x, *coeff2)/np.max(gauss(x, *coeff2)) #added for right gaussian fitting
ax[1,1].plot(x, hist_fit2, label='stars_fit')
ax[1,1].plot(x, hist_fit3, label='stars_fit_norm') #added for right gaussian fitting
ax[1,1].axvline(x[hist_fit3>star_cut][0],c='tab:pink',label='cut:%.3f'%x[hist_fit3>star_cut][0]) #added for right gaussian fitting
ax[1,1].legend(loc='best')
ax[1,1].set_xlabel('Delta')
ax[1,1].set_ylabel('Histogram')
ax[0,2].axhline(x[hist_fit3>star_cut][0],c='tab:pink') #added for right gaussian fitting
ax[1,0].axhline(x[hist_fit3>star_cut][0],c='tab:pink') #added for right gaussian fitting
ax[1,2].axhline(star_cut, c='tab:red') # added for right gaussian fitting
maxi=np.max(gauss(delta,*coeff2))
def prob_SG(delta,maxi,*coeff2):
if delta>0.:
return 0.
elif delta<=0.:
return 1. - (gauss(delta, *coeff2) / maxi)
vprob_SG= np.vectorize(prob_SG)
SG=1.-vprob_SG(delta1,maxi,*coeff2)
df_i1.loc[:,'SG']=SG
param_izp=read_param_izp('psf')
mag0=param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
axi = ax[1, 2].scatter(df_i1.MAG_APER + mag0, SG,
marker='.', c=df_i1[color_axis], vmin=0., vmax=0.005)
ax[1,2].axvline(aper_cut, ls='--', c='tab:blue')
ax[1,2].axhline(SG_upper, ls='--', c='tab:blue')
ax[1,2].set_ylim(-0.02,1.02)
ax[1,2].set_xlabel('MAG APER i')
ax[1,2].set_ylabel('SG (probability to be a star)')
plt.suptitle(field+' seeing vs true_seeing: '+str(seeing)+','+str(true_seeing))
fig.colorbar(axi)
plt.tight_layout(rect=[0, 0., 1, 0.98])
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/star_galaxy_sep_12_all%s.png' % field, dpi=120)
plt.close(fig)
return df_i_cut, df_i1
def pisco_cut_frame(field):
# df_i=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/'+
# '%s_catalog.fits'%field)[1].data).to_pandas()
"""
c_a: CLASS_STAR lower limit for stars used for the linear fit
c_b, c_c: upper and lower limit for all objects selection
c_c can be moved with the for loop to include more objects until the confusion limit
c_d: Faintest magnitude for stars used for the linear fit
c_delta: lower limit for Delta to consider stars before fitting the gaussian and find SG (Star/Galaxy) factor
"""
seeing=find_seeing_fits(field)
true_seeing=find_seeing(field,'i')
##Using SPREAD_MODEL to seperate star/galaxies
fname = "/Users/taweewat/Documents/pisco_code/slr_output/total_psf_%s.csv"%field
df0 = pd.read_csv(fname)
df0['NUMBER'] = np.arange(0, len(df0), 1).tolist()
df0.rename(columns={'ALPHA_J2000': 'ALPHA_J2000_i'}, inplace=True)
df0.rename(columns={'DELTA_J2000': 'DELTA_J2000_i'}, inplace=True)
#EXTENDED_COADD: 0 star, 1 likely star, 2 mostly galaxies, 3 galaxies
# df0['EXTENDED_COADD']=np.array(((df0['SPREAD_MODEL_i']+ 3*df0['SPREADERR_MODEL_i'])>0.005).values, dtype=int)+\
# np.array(((df0['SPREAD_MODEL_i']+df0['SPREADERR_MODEL_i'])>0.003).values, dtype=int)+\
# np.array(((df0['SPREAD_MODEL_i']-df0['SPREADERR_MODEL_i'])>0.003).values, dtype=int)
# dff=df0[df0['EXTENDED_COADD']>1]
# dff_star=df0[df0['EXTENDED_COADD']<2]
dfi=df0[df0['MAG_AUTO_i']<-16]
x=dfi['MAG_AUTO_i']
y=dfi['SPREAD_MODEL_i']
p_spread=np.poly1d(np.polyfit(x,y,1))
xs=np.arange(np.min(df0['MAG_AUTO_i']),np.max(df0['MAG_AUTO_i']),0.01)
df0['SPREAD_MODEL_i2']=df0['SPREAD_MODEL_i']-p_spread(df0['MAG_AUTO_i'])
dff=df0[(df0['SPREAD_MODEL_i']>0.005)]
# dff_star=df0[np.abs(df0['SPREAD_MODEL_i'])<0.004] #+5/3.*df0['SPREADERR_MODEL_i'] <0.002
dff_star=df0[(df0['SPREAD_MODEL_i']<0.004)]#&(df0['FLAGS_i']<4)]
fig=plt.figure(figsize=(4,4))
plt.plot(df0['MAG_AUTO_i'],df0['SPREAD_MODEL_i'],'.',c='grey',alpha=0.1)
plt.plot(dff['MAG_AUTO_i'],dff['SPREAD_MODEL_i'],'.',alpha=1,label='galaxies')
plt.plot(dff_star['MAG_AUTO_i'],dff_star['SPREAD_MODEL_i'],'.',alpha=1,label='stars')
plt.ylim(-0.08,0.08)
plt.xlim(-19,-10.5)
plt.axhline(0.005,color='tab:orange')
plt.legend(loc='best')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/spread_model_real_i_fit_%s_%s.png' %
(mode, field), dpi=120)
plt.close(fig)
dff0=dff
dff0.to_csv("/Users/taweewat/Documents/pisco_code/slr_output/"+\
"galaxy_psf_total_%s.csv"%field)
# dff_star0=pd.merge(dff_star, df0, on='NUMBER') # for non-SPREAD_MODEL
dff_star0=dff_star #for SPREAD_MODEL
dff_star0.to_csv("/Users/taweewat/Documents/pisco_code/slr_output/"+\
"star_psf_total_%s.csv"%field)
def pisco_photometry_psf_v4(field, mode='psf', mode2mass='', slr=True): #mode2mass: '' vs '_no2mass'
def slr_running_psf(field, infile="None", mode="psf", mode2mass='', bigmacs="pisco_pipeline/big-macs-calibrate-master"):
"""
slr_running: running SLR script from github.com/patkel/big-macs-calibrate to get a calibrated magnitude
INPUT:
- field: object of interset e.g., 'Field026'
- bigmacs: the location for "big-macs-calibrate" directoty
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
slrdir = 'slr_output'
pyfile = os.path.join(bigmacs, 'fit_locus.py')
# cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i -j --plot=PLOTS_%s_%s" \
# % (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,'')), mode, field)
if mode2mass=='':
cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i -j --plot=PLOTS_%s_%s" \
% (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,mode2mass)), mode, field) #'' vs '_no2mass'
elif mode2mass=='_no2mass':
cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i --plot=PLOTS_%s_%s" \
% (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,mode2mass)), mode, field) #'' vs '_no2mass'
print cmd
subprocess.check_call(shlex.split(cmd))
def update_color(fname, table, mode='psf'):
"""
update_color: using the output from SLR, update to the correct magnitude
INPUT:
- fname: input file from SLR output (...offsets.list)
- table: the table that we want to update the value (from column magg,etc to MAG_g,etc)
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
print fname
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
# print content
if len(content)==8:
red_content=content[4:]
elif len(content)==10:
red_content=content[5:-1]
# if len(content)==7:
# red_content=content[4:]
# elif len(content)==9:
# red_content=content[5:-1]
band = [x.split(' ')[0][-1] for x in red_content]
corr = [float(x.split(' ')[1]) for x in red_content]
ecorr = [float(x.split(' ')[3]) for x in red_content]
print 'bands = ', band
if mode=='psf':
MODE1='PSF'
elif mode=='model':
MODE1='MODEL'
elif mode=='auto':
MODE1='AUTO'
elif mode=='aper':
MODE1='APER'
elif mode=='hybrid':
MODE1='HYBRID'
elif mode=='iso':
MODE1='ISO'
table['MAG_' + band[0]] = table['MAG_%s_'%MODE1 + band[0]] + corr[0]
table['MAG_' + band[1]] = table['MAG_%s_'%MODE1 + band[1]] + corr[1]
table['MAG_' + band[2]] = table['MAG_%s_'%MODE1 + band[2]] + corr[2]
table['MAG_' + band[3]] = table['MAG_%s_'%MODE1 + band[3]] + corr[3]
table['MAGERR_' + band[0]] = (table['MAGERR_%s_'%MODE1 + band[0]]**2)**0.5# + ecorr[0]**2)**0.5
table['MAGERR_' + band[1]] = (table['MAGERR_%s_'%MODE1 + band[1]]**2)**0.5# + ecorr[1]**2)**0.5
table['MAGERR_' + band[2]] = (table['MAGERR_%s_'%MODE1 + band[2]]**2)**0.5# + ecorr[2]**2)**0.5
table['MAGERR_' + band[3]] = (table['MAGERR_%s_'%MODE1 + band[3]]**2)**0.5# + ecorr[3]**2)**0.5
return table
slrdir = 'slr_output'
df0=pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv" % field,index_col=0)
# if field=='SDSS603':
# df0=df0.drop([399,258,357,157,381,310,86,81,31,66,422,232,208,19,10])
# elif field=='SDSS501':
# df0=df0.drop([265,108,196,213,160])
# elif field=='SDSS123':
# df0=df0.drop([68,5,61])
# else:
# df0=df0
total3 = Table.from_pandas(df0)
total3=total3[['NUMBER','ALPHA_J2000_i','DELTA_J2000_i','XWIN_IMAGE_i','YWIN_IMAGE_i',\
'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
'MAGERR_AUTO_z','MAG_ISO_i','MAGERR_ISO_i','MAG_ISO_g','MAGERR_ISO_g','MAG_ISO_r',\
'MAGERR_ISO_r','MAG_ISO_z','MAGERR_ISO_z',\
'MAG_SPHEROID_i','MAGERR_SPHEROID_i','MAG_SPHEROID_g',\
'MAGERR_SPHEROID_g','MAG_SPHEROID_r','MAGERR_SPHEROID_r','MAG_SPHEROID_z',\
'MAGERR_SPHEROID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
'SPREAD_MODEL_i','SPREAD_MODEL_z','SPREADERR_MODEL_g','SPREADERR_MODEL_r',\
'SPREADERR_MODEL_i','SPREADERR_MODEL_z']]
print 'number of stars =', len(total3)
if (mode2mass==''):
starpsfmode = '_psf'
elif (mode2mass=='_no2mass'):
starpsfmode ='_no2mass'
# total3.write(slrdir+'/star_psf%s_%s_%i.fits' % ('_psf',field,0), overwrite=True) #with 2MASS stars: star_psf_psf_%s_%i.fits
total3.write(slrdir+'/star_psf%s_%s_%i.fits'%(starpsfmode,field,0),overwrite=True)
# no 2MASS star mode vs , '_psf' vs '_no2mass'
if slr:
slr_running_psf(field, infile=slrdir + '/star_psf%s_%s_%i.fits' %
(starpsfmode, field, 0), mode='psf', mode2mass=mode2mass) # '_psf' vs '_no2mass'
print 'mode=', mode, '/star_psf%s_%s_%i.fits.offsets.list' % (starpsfmode, field, 0)
total_gal=Table.from_pandas(pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/galaxy_psf_total_%s.csv"%(field)))
ntotal_gal = update_color(slrdir+'/star_psf%s_%s_%i.fits.offsets.list' %
(starpsfmode, field, 0), total_gal, mode=mode)
ntotal_gal.write(os.path.join(
slrdir, 'galaxy_%s%s_ntotal_%s.csv'%(mode,mode2mass,field)), overwrite=True)
total_star=Table.from_pandas(pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv"%(field)))
ntotal_star = update_color(slrdir+'/star_psf%s_%s_%i.fits.offsets.list'%
(starpsfmode, field, 0), total_star, mode=mode)
ntotal_star.write(os.path.join(
slrdir, 'star_%s%s_ntotal_%s.csv'%(mode,mode2mass,field)), overwrite=True)
def make_images(field,ax=None):
dir='/Users/taweewat/Documents/pisco_code/Chips_images/'
try:
ax.imshow(image.imread(dir+"aplpy4_%s_img4.jpeg"%field))
except:
ax.imshow(image.imread(dir+"aplpy4_%s_img.jpeg"%field))
# ax.imshow(image.imread(dir+"aplpy4_%s_img4.jpeg"%field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axis('off')
return None
# def sur_pro(r): #Mpc
# def fn(x):
# if x>=1:
# return 1.-(2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1.)/(x+1.))))
# elif x<1:
# return 1.-(2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1.-x)/(x+1.))))
# rs=0.15/0.71 #Mpc
# if r>=(0.1/0.71):
# return 1/((r/rs)**2-1)*fn(r/rs)
# elif r<(0.1/0.71):
# return 1./(((0.1/0.71)/rs)**2-1)*fn((0.1/0.71)/rs)
# def k_NFW():
# def integrated(y):
# return 1./integrate.quad(lambda r: 2*np.pi*r*sur_pro(r),0,y)[0]
# xy=np.logspace(-3,3,num=30)
# X = np.log(xy)
# Y = np.log([integrated(np.e**(y)) for y in X])
# Z=np.polyfit(X,Y,6)
# k_NFW = np.poly1d(Z)
# return k_NFW
# def sur_pro_prob(r,rc,k_NFW): #(Mpc,Mpc) # Weighted based on the distance from the center (Rykoff+12)
# return np.e**(k_NFW(np.log(rc)))*sur_pro(r)
name=['z','dist','age','mass','Abs_g','App_g','kcorr_g','Abs_r',\
'App_r','kcorr_r','Abs_i','App_i','kcorr_i','Abs_z','App_z','kcorr_z']
df=pd.read_csv('/Users/taweewat/Documents/red_sequence/rsz/model/'+\
# 'ezmodel2_bc03_zf2.5_chab_0.016_exp_0.1.txt',
'ezmodel2_bc03_zf2.5_chab_0.02_exp_0.1.txt',
# 'ezmodel2_c09_zf3.0_chab_0.02_exp_0.1.txt',
skiprows=27,delim_whitespace=True,names=name)
df=df[(df.z>=0.1) & (df.z<1.)]
z_new=np.arange(0.1, 0.95, 0.0025)
Appi_new = interpolate.splev(z_new, interpolate.splrep(df.z, df.App_i, s=0), der=0)
Appi_f = interpolate.interp1d(df.z, df.App_i, kind='cubic')
#all extra options
extra_name= 'gnorm_zf2.5_bc03_noebv_auto_bin1.0_root15_sur0.25' #'gremove_lum_silk_zf2.5_c09_11', 'gremove_silk_zf3_c09_noebv_model_complete_no2mass'
core_radius=0.25
gremove = False # remove non-detect g objects from the list
duplicate = False # remove duplicate redshift (uncertain)
colorerr = True # add redshift with color_error taken into account
transparent = True # make transparent plot for flip book
img_filp = False # make image flip from transparent
img_redshift = True # make image with redshift for each object
def linear_rmi(x0,redshift):
x=df.z[:-11] #-12
y=(df.App_r-df.App_i)[:-11] #-12
yhat = np.polyfit(x, y, 5) #5 vs 9
f_rmi = np.poly1d(yhat)
slope=-0.0222174237562*1.007
# Appi0=Appi_new[np.where(abs(z_new-redshift)<=1e-9)[0][0]]
Appi0=Appi_f(redshift)
return slope*(x0-Appi0)+f_rmi(redshift)
def linear_gmr(x0,redshift):
x=df.z[:-24] #-25
y=(df.App_g-df.App_r)[:-24] #-25
yhat = np.polyfit(x, y, 5)
f_gmr = np.poly1d(yhat)
slope=-0.0133824600874*1.646
# Appi0=Appi_new[np.where(abs(z_new-redshift)<=1e-9)[0][0]]
Appi0=Appi_f(redshift)
return slope*(x0-Appi0)+f_gmr(redshift)
def linear_gmi(x0,redshift):
x=df.z[:-9]
y=(df.App_g-df.App_i)[:-9]
yhat = np.polyfit(x, y, 5)
f_gmi = np.poly1d(yhat)
Appi0=Appi_f(redshift)
slope = -0.04589707934164738 * 1.481
return slope*(x0-Appi0)+f_gmi(redshift)
def find_fits_dir(field):
home = '/Users/taweewat/Documents/pisco_code/'
dirs = ['ut170103/', 'ut170104/', 'ut170619/', 'ut170621/',\
'ut170624/', 'ut171208/', 'ut171209/', 'ut171212/']
myReg = re.compile(r'(%s_A).*' % field)
for di in dirs:
diri = home + di
for text in os.listdir(diri):
if myReg.search(text) != None:
# filename = myReg.search(text).group()
allfilename = diri
return allfilename
dir_dict = dict(zip(['ut170103/','ut170104/','ut170619/',\
'ut170621/','ut170624/','ut171208/','ut171209/','ut171212/'], np.arange(1, 9)))
def find_ra_dec(field):
if field == 'PKS1353':
RA = 209.0225
DEC = -34.3530556
redshift = 0.223
elif field == 'CHIPS2249-2808': #CHIPS2227-4333
# RA = 336.99975202151825
# DEC = -43.57623068466675
RA = 336.98001
DEC = -43.56472
redshift = -1
elif field == 'CHIPS2246-2854': #'CHIPS2223-3455'
# RA = 335.7855174238757
# DEC = -34.934569299688185
RA = 335.78
DEC = -34.9275
redshift = -1
elif field[0:5] == 'Field':
base = pd.read_csv(
'/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs.csv')
RA = base[base.name == field].ra.values[0]
DEC = base[base.name == field].dec.values[0]
redshift = base[base.name == field].redshift.values[0]
elif field[0:5] == 'CHIPS':
base = pd.read_csv(
'/Users/taweewat/Documents/red_sequence/chips_all_obj.csv', index_col=0)
RA = base[base.chips == field].ra.values[0]
DEC = base[base.chips == field].dec.values[0]
redshift = base[base.chips == field].redshift.values[0]
elif field[0:4] == 'SDSS':
base = pd.read_csv(
'/Users/taweewat/Documents/xray_project/ned-result/final_sdss_cut5.csv', index_col=0)
RA = base[base.name == field].RA.values[0]
DEC = base[base.name == field].DEC.values[0]
redshift = base[base.name == field].redshift.values[0]
return RA, DEC, redshift
def pisco_tilt_resequence(field, mode='psf', mode2mass=''):
RA, DEC, redshift = find_ra_dec(field)
if redshift!=-1:
qso_redshift=redshift
else:
qso_redshift=0.2
print 'RA', RA
print 'DEC', DEC
ebv = ebvpy.calc_ebv(ra=[RA],dec=[DEC]); print 'ebv:', ebv[0]
# ebv_g=ebvpy.calc_color_correction('g', ebv)[0]
# ebv_r=ebvpy.calc_color_correction('r', ebv)[0]
# ebv_i=ebvpy.calc_color_correction('i', ebv)[0]
# ebv_z=0.0
ebv_g,ebv_r,ebv_i,ebv_z=0.0,0.0,0.0,0.0 #no longer use reddening correction because it is already included in SLR
print 'ebv_g:', ebv_g, 'ebv_r:', ebv_r, 'ebv_i:', ebv_i
param_izp=read_param_izp(mode) #i zero point
# fname = "/Users/taweewat/Documents/pisco_code/slr_output/galaxy_ntotal_%s.csv"%field
dir_slrout='/Users/taweewat/Documents/pisco_code/slr_output/'
fname = dir_slrout+"galaxy_%s%s_ntotal_%s.csv" % (
mode, mode2mass, field) # '' vs '_no2mass'
df0 = pd.read_csv(fname,index_col=0)
if gremove:
nog=len(df0[df0['MAG_PSF_g'] >= 50.]); print "no g detected:", nog
df0 = df0[df0['MAG_PSF_g'] < 50.].copy() # cut out not detected objects in g band
else:
nog=0
c5 = SkyCoord(ra=df0['ALPHA_J2000_i'].values*u.degree, dec=df0['DELTA_J2000_i'].values*u.degree)
c0 = SkyCoord(ra=RA*u.degree, dec=DEC*u.degree)
sep = c5.separation(c0)
df0['sep(deg)']=sep
df0['sep(Mpc)']=sep*60.*cosmo.kpc_proper_per_arcmin(qso_redshift).value/1e3
cut=df0
dfi = cut#.drop_duplicates(subset=['XWIN_WORLD', 'YWIN_WORLD'], keep='first').copy()
print 'duplicates:', len(df0), len(dfi)
# Added Galactic Reddening (6/16/18)
if mode2mass == '':
dfi['MAG_i']=dfi['MAG_i']-ebv_i
dfi['MAG_g']=dfi['MAG_g']-ebv_g
dfi['MAG_r']=dfi['MAG_r']-ebv_r
# Use i Zero Point from each day and g,r zero point fron the color (6/22/18)
elif mode2mass == '_no2mass':
mag0 = param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
print 'i_zp_day', find_fits_dir(field), mag0
dfi['MAG_i']=dfi['MAG_i']-ebv_i+mag0
dfi['MAG_g']=dfi['MAG_g']-ebv_g+mag0
dfi['MAG_r']=dfi['MAG_r']-ebv_r+mag0
dfi['MAG_z']=dfi['MAG_z']-ebv_z+mag0
dfi.to_csv(dir_slrout+"galaxy_%s_final_%s.csv"%(mode,field))
fname=dir_slrout+"star_%s%s_ntotal_%s.csv" % (mode, mode2mass, field)
df0=pd.read_csv(fname,index_col=0)
dfi=df0
# Added Galactic Reddening (6/16/18)
if mode2mass == '':
dfi['MAG_i']=dfi['MAG_i']-ebv_i
dfi['MAG_g']=dfi['MAG_g']-ebv_g
dfi['MAG_r']=dfi['MAG_r']-ebv_r
# Use i Zero Point from each day and g,r zero point fron the color (6/22/18)
elif mode2mass == '_no2mass':
mag0 = param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
print 'i_zp_day', find_fits_dir(field), mag0
dfi['MAG_i']=dfi['MAG_i']-ebv_i+mag0
dfi['MAG_g']=dfi['MAG_g']-ebv_g+mag0
dfi['MAG_r']=dfi['MAG_r']-ebv_r+mag0
dfi['MAG_z']=dfi['MAG_z']-ebv_z+mag0
dfi.to_csv(dir_slrout+"star_%s_final_%s.csv"%(mode,field))
return None
# dfi=dfi[dfi['MAG_i']<21.5].copy()
# dfi=dfi[dfi.MAGERR_g<0.5]
# dfi=dfi[(dfi.MAG_g<100)&(dfi.MAG_i<100)&(dfi.MAG_r<100)]
# dfi=dfi[(dfi.FLAGS_g<5)&(dfi.FLAGS_r<5)&(dfi.FLAGS_i<5)&(dfi.FLAGS_z<5)]
def xxx(x):
dfi=dfi[np.sqrt(dfi['MAGERR_r']**2+dfi['MAGERR_i']**2)<0.3].copy() #0.5
x=dfi['MAG_i']
y=np.sqrt(dfi['MAGERR_r']**2+dfi['MAGERR_i']**2)
p=np.poly1d(np.polyfit(x,np.log(y),1, w=np.sqrt(y)))
Mag_cut=(p-np.log(0.067*1.5)).roots;
print "Mag_cut: %.2f"%(Mag_cut)
xs=np.arange(np.min(x),np.max(x),0.01)
fig,ax=plt.subplots(figsize=(5,5))
plt.plot(x,y,'.',label='r-i')
plt.plot(xs,np.exp(p(xs)),label='exp({:.1f}+{:.1f}x)'.format(p[0],p[1]))
plt.xlabel('Mag_i'); plt.ylabel('$\Delta r-i$ err')
plt.ylim(-0.05,0.35)
plt.axvline(Mag_cut,label='Mag_cut')
plt.legend(loc='best')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/uncer_%s_%s.png' %
(mode, field), dpi=120)
# plt.tight_layout()
plt.close(fig)
#Magnitude cut
print field, qso_redshift, df0.shape, cut.shape, dfi.shape, dfi['sep(deg)'].max(), dfi['sep(Mpc)'].max()
norm = matplotlib.colors.Normalize(vmin=0.10,vmax=0.675)
c_m = matplotlib.cm.cool
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
I=np.arange(16,24,0.01)
dfi.loc[:,"z_gmr"] = np.nan
dfi.loc[:,"z_rmi"] = np.nan
dfi.loc[:,"w_gmr"] = np.nan
dfi.loc[:,"w_rmi"] = np.nan
dfi.loc[:,"w_col_gmr"] = np.nan
dfi.loc[:,"w_col_rmi"] = np.nan
# dfi.loc[:,"z_gmi"] = np.nan
# dfi.loc[:,"w_gmi"] = np.nan
# dfi.loc[:,"w_col_gmi"] = np.nan
# k_NFW0=k_NFW()
bin_width=0.035 #0.025
# bins_gmr_cen = np.arange(0.15815-0.0175, 0.33315-0.0175+0.01, bin_width)
# bins_gmr_edge = np.arange(0.14065-0.0175, 0.35065-0.0175+0.01, bin_width)
# bins_gmr_cen = np.arange(0.12315+0.0175, 0.33315+0.0175+0.01, bin_width)
# bins_gmr_edge = np.arange(0.10565+0.0175, 0.35065+0.0175+0.01, bin_width)
# bins_rmi_cen = np.arange(0.36815-0.0175, 0.64815-0.0175+0.01, bin_width)
# bins_rmi_edge = np.arange(0.35065-0.0175, 0.66565-0.0175+0.01, bin_width)
#new one: combine last gmr with "new" rmi
bins_gmr_cen = np.arange(0.12315, 0.33315+0.01, bin_width)
bins_gmr_edge = np.arange(0.10565, 0.35065+0.01, bin_width)
bins_rmi_cen = np.arange(0.36815-bin_width, 0.64815+0.01, bin_width)
bins_rmi_edge = np.arange(0.35065-bin_width, 0.66565+0.01, bin_width)
z_rmi,w_rmi,w_col_rmi=[],[],[]
for i, row in dfi.iterrows():
for z in bins_rmi_cen:
# if row['MAG_i'] < -18+5.*np.log10(ex.d_L(z)*1e6)-5.:
# if row['MAG_i'] < magi_cut_rmi:
# if np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)<0.134: #np.mean(f_rmi(x+0.07)-f_rmi(x))
# if np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)<0.067*1.5: #0.067*1.5
if row['MAG_i'] < Mag_cut:
rmi=row['MAG_r']-row['MAG_i']
# rmierr=np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)
low_edge=linear_rmi(row['MAG_i'],round(z-0.0175,4)) #0.0125
high_edge=linear_rmi(row['MAG_i'],round(z+0.0175,4)) #0.0125
if (rmi > low_edge) & (rmi <= high_edge):
# if (np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2) < 3.5*(high_edge-low_edge)):
z_rmi.append(round(z,3))
# wrmi0=sur_pro_prob(row['sep(Mpc)'],1.,k_NFW0)
wrmi0=ex.sur_pro_prob_ang(row['sep(deg)']*60, core_radius); w_rmi.append(wrmi0) #arcmin
# w_col_rmi0=scipy.stats.norm(rmi,rmierr).cdf(high_edge)-scipy.stats.norm(rmi,rmierr).cdf(low_edge); w_col_rmi.append(w_col_rmi0)
w_col_rmi0=1.; w_col_rmi.append(w_col_rmi0)
dfi.loc[i,"z_rmi"]=z
dfi.loc[i,"w_rmi"]=wrmi0
dfi.loc[i,"w_col_rmi"]=w_col_rmi0
z_gmr,w_gmr,w_col_gmr=[],[],[]
for i, row in dfi.iterrows():
for z in bins_gmr_cen:
# if row['MAG_i'] < -18+5.*np.log10(ex.d_L(z)*1e6)-5.:
# if row['MAG_i'] < magi_cut_gmr:
# if np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2)<0.165: #np.mean(f_gmr(x+0.07)-f_gmr(x))
# if np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2)<0.0825*1.5: #0.0825*1.5
if row['MAG_i'] < Mag_cut:
gmr=row['MAG_g']-row['MAG_r']
# gmrerr=np.sqrt((row['MAGERR_g'])**2+row['MAGERR_r']**2) #add factor 2.2 to reduce the g error to be similar to other bands
low_edge=linear_gmr(row['MAG_i'],round(z-0.0175,4)) #0.0125
high_edge=linear_gmr(row['MAG_i'],round(z+0.0175,4)) #0.0125
if (gmr > low_edge) & (gmr <= high_edge):
# if (np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2) < 3.5*(high_edge-low_edge)):
z_gmr.append(round(z,3))
# w_col_gmr0=scipy.stats.norm(gmr,gmrerr).cdf(high_edge)-scipy.stats.norm(gmr,gmrerr).cdf(low_edge); w_col_gmr.append(w_col_gmr0)
w_col_gmr0=1.; w_col_gmr.append(w_col_gmr0)
# wgmr0=sur_pro_prob(row['sep(Mpc)'],1.,k_NFW0); w_gmr.append(wgmr0)
wgmr0 = ex.sur_pro_prob_ang(row['sep(deg)'] * 60, core_radius); w_gmr.append(wgmr0) # arcmin
dfi.loc[i,"z_gmr"]=z
dfi.loc[i,"w_gmr"]=wgmr0
dfi.loc[i,"w_col_gmr"]=w_col_gmr0
ns1,xs1=np.histogram(z_gmr,bins=bins_gmr_edge,weights=np.array(w_gmr)*np.array(w_col_gmr)) #0.15-0.325
bin_cen1 = (xs1[:-1] + xs1[1:])/2
ns2,xs2=np.histogram(z_rmi,bins=bins_rmi_edge,weights=np.array(w_rmi)*np.array(w_col_rmi)) #0.36-0.675
bin_cen2 = (xs2[:-1] + xs2[1:])/2
# z_total=np.append(bin_cen1, bin_cen2)
# n_total=np.append(ns1,ns2)
z_total=np.append(bin_cen1, bin_cen2[1:])
n_total=np.append(np.append(ns1[:-1],np.array(ns1[-1]+ns2[0])),np.array(ns2[1:]))
z_max=z_total[np.where(n_total==np.max(n_total))[0][0]]
n_median = np.median(n_total[n_total != 0])
n_mean = np.mean(n_total)
n_bkg = np.mean(sorted(n_total)[2:-2]);
z_total_added = np.insert(
np.append(z_total, z_total[-1] + bin_width), 0, z_total[0] - bin_width)
n_total_added = np.insert(np.append(n_total, 0), 0, 0) - n_bkg
# print 'n_total_added', n_total_added
lumfn=pd.read_csv('/Users/taweewat/Documents/red_sequence/coma_cluster_luminosity_function/schecter_fn.csv',\
names=['M_r','theta(M)Mpc^-3'])
h=0.7
x=lumfn['M_r']+5*np.log10(h);
y=lumfn['theta(M)Mpc^-3']*(h**3)
f1d=interp1d(x, y,kind='cubic')
def lum_function(M):
alpha = -1.20
Nb = np.log(10) / 2.5 * 0.002 * (70 / 50.)**3
Mb_s = -21. + 5 * np.log10(70 / 50.)
return Nb * (10.**(0.4 * (alpha + 1) * (Mb_s - M))) * np.exp(-10.**(0.4 * (Mb_s - M)))
lum_fn = lambda z: integrate.quad( f1d, -23.455, ex.abs_mag(22.25, z))[0]
lum_vfn = np.vectorize(lum_fn)
dense_fn = lambda z: integrate.quad(ex.NFW_profile,0.001,cosmo.kpc_proper_per_arcmin(z).value/1e3)[0]
dense_vfn = np.vectorize(dense_fn)
n_total_adj=n_total_added #/(lum_vfn(z_total_added)*dense_vfn(z_total_added)) (adjusted the peak before picking it)
print 'n_total_added:', n_total_added
print 'n_total_adj:', n_total_adj
indi = np.where(n_total_adj == np.max(n_total_adj))[0][0]
# indi = np.where(n_total_added == np.max(n_total_added))[0][0]
z_fit = z_total_added[[indi - 1, indi, indi + 1]]; print 'z_fit', z_fit
n_fit = n_total_added[[indi - 1, indi, indi + 1]]; print 'n_fit', n_fit
def gaussian_func(x, a, mu):
sigma=0.035
return a * np.exp(-(x-mu)**2/(2*(sigma**2)))
if (n_fit[0]<0.) and (n_fit[2]<0.):
popt, pcov = curve_fit(gaussian_func, z_fit, [0,n_fit[1],0], p0=[n_fit[1],z_fit[1]])
else:
popt, pcov = curve_fit(gaussian_func, z_fit,
n_fit, p0=[n_fit[1], z_fit[1]])
# signal=tuple(popt)[0]
# def v_func(z):
# return (z**2+2*z)/(z**2+2*z+2)
# signal=((np.max(n_total)-np.mean(n_total))*(v_func(z_max)*(4000))**2)/5.3e6 #normalization for r~1 at z~0.15
# signal = (
# (tuple(popt)[0]) * (cosmo.luminosity_distance(tuple(popt)[1]).value)**1.5) / 5.3e5 # normalization for r~1 at z~0.15
def lum_function(M):
alpha = -1.20
Nb = np.log(10) / 2.5 * 0.002 * (70 / 50.)**3
Mb_s = -21. + 5 * np.log10(70 / 50.)
return Nb * (10.**(0.4 * (alpha + 1) * (Mb_s - M))) * np.exp(-10.**(0.4 * (Mb_s - M)))
lumfn=pd.read_csv('/Users/taweewat/Documents/red_sequence/coma_cluster_luminosity_function/schecter_fn.csv',\
names=['M_r','theta(M)Mpc^-3'])
h=0.7
x=lumfn['M_r']+5*np.log10(h);
y=lumfn['theta(M)Mpc^-3']*(h**3)
f1d=interp1d(x, y,kind='cubic')
z_max_fit = tuple(popt)[1]
# lum_factor = integrate.quad(lum_function, -24, abs_mag(21.60, tuple(popt)[1]))[0]
# lum_factor = cosmo.luminosity_distance(tuple(popt)[1]).value**-1.5*100
lum_factor = integrate.quad( f1d, -23.455, ex.abs_mag(22.25, z_max_fit))[0]
#-23.455: min abs Mag from schecter_fn.csv, 22.25: median of Mag r
density_factor=integrate.quad(ex.NFW_profile, 0.001, core_radius*cosmo.kpc_proper_per_arcmin(z_max_fit).value/1e3)[0]
signal = tuple(popt)[0] / (lum_factor * density_factor)
print 'z_max_fit', z_max_fit
print 'lum_factor:', lum_factor
print 'density_factor', density_factor
# duplicate=False ## set duplication
n_total_dup=0
## Plot the figure
cmap=matplotlib.cm.RdYlGn
if duplicate or colorerr:
fig,ax=plt.subplots(1,5,figsize=(25,5))
else:
fig,ax=plt.subplots(1,4,figsize=(20,5))
make_images(field,ax[0])
norm = matplotlib.colors.Normalize(vmin=0.01,vmax=2)
dfi_ri=dfi.loc[dfi['z_rmi'].dropna().index]
ax[1].scatter(dfi['MAG_i'],dfi['MAG_r']-dfi['MAG_i'],c='black',alpha=0.1)#dfi['w_rmi'],cmap=cmap)
ax[1].scatter(dfi_ri['MAG_i'],dfi_ri['MAG_r']-dfi_ri['MAG_i'],c=dfi_ri['w_rmi'],cmap=cmap)#,norm=norm)
ax[1].errorbar(dfi_ri['MAG_i'],dfi_ri['MAG_r']-dfi_ri['MAG_i'],xerr=dfi_ri['MAGERR_i'],yerr=np.sqrt(dfi_ri['MAGERR_r']**2+dfi_ri['MAGERR_i']**2),fmt='none',c='k',alpha=0.05)
# plt.plot(df.App_i,df.App_r-df.App_i,'.')
# ax[1].axhline(xs[:-1][(xs[:-1]<1.33) & (xs[:-1]>0.6)][0],lw=0.7,color='green')
for z in bins_rmi_cen:
ax[1].plot(I,linear_rmi(I,round(z,4)),color=s_m.to_rgba(z))
ax[1].set_ylim(0.25,1.5)
ax[1].set_xlim(16,24)
# cbar=plt.colorbar(s_m)
ax[1].set_xlabel('I')
ax[1].set_ylabel('R-I')
ax[1].set_title('z=0.35-0.675')#, icut:'+str(magi_cut_rmi))
# plt.plot([corr_f(z) for z in df.z.values[5:-12]],df.App_r[5:-12]-df.App_i[5:-12],'-')
dfi_gr=dfi.loc[dfi['z_gmr'].dropna().index]
ax[2].scatter(dfi['MAG_i'],dfi['MAG_g']-dfi['MAG_r'],c='black',alpha=0.1)#,c=dfi['w_gmr'],cmap=cmap)
ax[2].scatter(dfi_gr['MAG_i'],dfi_gr['MAG_g']-dfi_gr['MAG_r'],c=dfi_gr['w_gmr'],cmap=cmap)#,norm=norm)
ax[2].errorbar(dfi_gr['MAG_i'],dfi_gr['MAG_g']-dfi_gr['MAG_r'],xerr=dfi_gr['MAGERR_i'],yerr=np.sqrt(dfi_gr['MAGERR_g']**2+dfi_gr['MAGERR_r']**2),fmt='none',c='k',alpha=0.05)
# plt.plot(df.App_i,df.App_g-df.App_r,'.')
# ax[2].axhline(xs[:-1][(xs[:-1]<1.65) & (xs[:-1]>np.min(x2))][0],lw=0.7,color='green')
for z in bins_gmr_cen:
ax[2].plot(I,linear_gmr(I,round(z,4)),color=s_m.to_rgba(z))
ax[2].set_ylim(0.75,2)
ax[2].set_xlim(16,24)
# cbar=plt.colorbar(s_m)
ax[2].set_xlabel('I')
ax[2].set_ylabel('G-R')
ax[2].set_title('z=0.15-0.325')
# plt.plot([corr_f(z) for z in df.z.values[:-25]],df.App_g[:-25]-df.App_r[:-25],'-')
xs=np.arange(np.min(z_fit)-0.1,np.max(z_fit)+0.1,0.001)
ax[3].bar(bin_cen2, ns2, width=bin_width, color='#1f77b4', alpha=1.0)
ax[3].bar(bin_cen1, ns1, width=bin_width, color='#ff7f0e', alpha=1.0)
ax[3].bar(z_total, n_total, width=bin_width, color='grey', alpha=0.5)
ax[3].axvline(0.3525,ls='--')
ax[3].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[3].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[3].plot(z_fit,n_fit+n_bkg,'o',c='tab:purple')
ax[3].plot(xs, gaussian_func(xs, *popt)+n_bkg, c='tab:green', ls='--', label='fit: a=%.2f, mu=%.4f'% tuple(popt))
ax[3].axhline(n_median,color='tab:green',label='median:%.2f'%n_median)
ax[3].axhline(n_mean,color='tab:red',label='mean:%.2f'%n_mean)
ax[3].legend(loc='best')
ax[3].set_xlabel('z')
ax[3].set_xlim(0.1,0.7)
ax[3].set_title('ebv:%.3f,ebv_g-r:-%.3f,ebv_r-i:-%.3f'%(ebv[0],ebv_g-ebv_r,ebv_r-ebv_i))
if np.max(n_total)<30:
ax[3].set_ylim(0,30)
if duplicate:
xs = np.arange(np.min(z_fit_dup) - 0.1, np.max(z_fit_dup) + 0.1, 0.001)
ax[4].bar(bin_cen2, ns2-ns_dup2, width=bin_width, color='#1f77b4') #widht = 0.025
ax[4].bar(bin_cen1, ns1-ns_dup1, width=bin_width, color='#ff7f0e') #width = 0.025
ax[4].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[4].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[4].plot(z_fit_dup,n_fit_dup+n_bkg_dup,'o',c='tab:purple')
ax[4].plot(xs, gaussian_func(xs, *popt_dup)+n_bkg_dup, c='tab:green', ls='--', label='fit: a=%.2f, mu=%.4f'% tuple(popt))
ax[4].legend(loc='best')
ax[4].set_xlabel('z')
ax[4].set_xlim(0.1,0.7)
if np.max(n_total)<30:
ax[4].set_ylim(0,30)
if colorerr:
dfi_rmi = dfi[~np.isnan(dfi['z_rmi'])]
dfi_gmr = dfi[~np.isnan(dfi['z_gmr'])]
zs_gmr = np.arange(0.11, 0.3425, 0.002)
zs_rmi = np.arange(0.3425, 0.65, 0.002)
ntot_rmi = np.repeat(0, len(zs_rmi))
ntot_gmr = np.repeat(0, len(zs_gmr))
for i, row in dfi_rmi.iterrows():
# for i, row in dfi.iterrows():
i0 = row['MAG_i']
rmi = row['MAG_r'] - row['MAG_i']
rmierr = np.sqrt((row['MAGERR_r'])**2 + row['MAGERR_i']**2)
ntot_rmi0 = scipy.stats.norm(rmi, rmierr).pdf(
linear_rmi(i0, zs_rmi))
ntot_rmi = ntot_rmi + ntot_rmi0 * row['w_rmi']
ax[4].plot(zs_rmi,ntot_rmi0*row['w_rmi'],'-',color='tab:red',alpha=0.2)
for i, row in dfi_gmr.iterrows():
# for i, row in dfi.iterrows():
i0 = row['MAG_i']
gmr = row['MAG_g'] - row['MAG_r']
gmrerr = np.sqrt((row['MAGERR_g'])**2 + row['MAGERR_r']**2)
ntot_gmr0 = scipy.stats.norm(gmr, gmrerr).pdf(
linear_gmr(i0, zs_gmr))
ntot_gmr = ntot_gmr + ntot_gmr0 * row['w_gmr']
ax[4].plot(zs_gmr,ntot_gmr0*row['w_gmr'],'-',color='tab:cyan',alpha=0.2)
ax[4].plot(zs_gmr, ntot_gmr, '.')
ax[4].plot(zs_rmi, ntot_rmi, '.')
ax[4].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[4].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[4].legend(loc='best')
ax[4].set_xlabel('z')
ax[4].set_xlim(0.1, 0.7)
if np.max(np.append(ntot_gmr,ntot_rmi)) < 200:
ax[4].set_ylim(0, 200)
n_total_cerr=np.append(ntot_gmr,ntot_rmi)
else:
n_total_cerr=0
signal_final = signal_dup if duplicate else signal
plt.tight_layout(rect=[0, 0., 1, 0.98])
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/',
'redsq_richg%s_%s_all_.*_%s_tilted.png' % ('', mode, field))
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_richg%s_%s_all_%.3f_%s_tilted.png' % ('',mode,signal_final,field), dpi=120)
plt.close(fig)
# fig,ax=plt.subplots(1,4,figsize=(20,5))
# make_images(field,ax[0])
# dfi_gmi=dfi[~np.isnan(dfi['z_gmi'])]
# zs_gmi=np.arange(0.115,0.69,0.002)
# ntot_gmi=np.repeat(0,len(zs_gmi))
# for i, row in dfi_gmi.iterrows():
# i0 = row['MAG_i']
# gmi = row['MAG_g'] - row['MAG_i']
# gmierr = np.sqrt((row['MAGERR_g'])**2 + row['MAGERR_i']**2)
# ntot_gmi0 = scipy.stats.norm(gmi, gmierr).pdf(
# linear_gmi(i0, zs_gmi))
# ntot_gmi = ntot_gmi + ntot_gmi0 * row['w_gmi']
# ax[3].plot(zs_gmi,ntot_gmi0*row['w_gmi'],'-',color='tab:cyan',alpha=0.2)
# ax[1].scatter(dfi['MAG_i'],dfi['MAG_g']-dfi['MAG_i'],c='black',alpha=0.1)#dfi['w_rmi'],cmap=cmap)
# ax[1].scatter(dfi_gmi['MAG_i'],dfi_gmi['MAG_g']-dfi_gmi['MAG_i'],c=dfi_gmi['w_gmi'],cmap=cmap)
# ax[1].errorbar(dfi_gmi['MAG_i'], dfi_gmi['MAG_g'] - dfi_gmi['MAG_i'], xerr=dfi_gmi['MAGERR_i'],
# yerr=np.sqrt(dfi_gmi['MAGERR_g']**2 + dfi_gmi['MAGERR_i']**2), fmt='none', c='k', alpha=0.05)
# for z in np.arange(0.15, 0.71, bin_width):
# ax[1].plot(I,linear_gmi(I,z),color=s_m.to_rgba(z))
# ax[1].set_ylim(1.0,3.5)
# ax[1].set_xlim(16,24)
# ax[1].set_xlabel('I')
# ax[1].set_ylabel('G-I')
# ax[1].set_title('z=0.15-0.675')
# ns3,xs3=np.histogram(z_gmi,bins=np.arange(0.1325,0.7,0.035),weights=np.array(w_gmi)*np.array(w_col_gmi))
# bin_cen3 = (xs3[:-1] + xs3[1:])/2
# z_max_gmi = bin_cen3[np.where(ns3 == np.max(ns3))[0][0]]
# n_bkg = np.mean(sorted(ns3)[2:-2]);
# z_total_added = np.insert(
# np.append(bin_cen3, bin_cen3[-1] + bin_width), 0, bin_cen3[0] - bin_width)
# n_total_added = np.insert(np.append(ns3, 0), 0, 0) - n_bkg
# indi = np.where(n_total_added == np.max(n_total_added))[0][0]
# z_fit = z_total_added[[indi - 1, indi, indi + 1]]; print 'z_fit', z_fit
# n_fit = n_total_added[[indi - 1, indi, indi + 1]]; print 'n_fit', n_fit
# if (n_fit[0]<0.) and (n_fit[2]<0.):
# popt_gmi, pcov_gmi = curve_fit(gaussian_func, z_fit, [0,n_fit[1],0], p0=[n_fit[1],z_fit[1]])
# else:
# popt_gmi, pcov_gmi = curve_fit(gaussian_func, z_fit,
# n_fit, p0=[n_fit[1], z_fit[1]])
# lum_factor2 = integrate.quad( f1d, -23.455, abs_mag(22.25, tuple(popt_gmi)[1]))[0]
# density_factor2=integrate.quad(NFW_profile,0.001,cosmo.kpc_proper_per_arcmin(tuple(popt_gmi)[1]).value/1e3)[0]
# signal_gmi = tuple(popt_gmi)[0] / (lum_factor2 * density_factor2)
# z_max_fit_gmi = tuple(popt_gmi)[1]
# ax[2].bar(bin_cen3, ns3, width = 0.035, color='#1f77b4')#, alpha=0.5)
# ax[2].axvline(z_max_gmi, ls='--', color='purple',
# label='z_max=%.3f'%z_max_gmi)
# ax[2].axvline(z_max_fit_gmi, ls='--', color='tab:green',
# label='z_max_fit=%.3f'%z_max_fit_gmi)
# ax[2].axvline(redshift,color='red',label='z:%.3f'%redshift)
# ax[2].plot(z_fit,n_fit+n_bkg,'o',c='tab:purple')
# xs=np.arange(np.min(z_fit)-0.1,np.max(z_fit)+0.1,0.001)
# ax[2].plot(xs, gaussian_func(xs, *popt_gmi) + n_bkg, c='tab:green',
# ls='--', label='fit: a=%.2f, mu=%.4f' % tuple(popt_gmi))
# ax[2].legend(loc='best')
# ax[2].set_xlabel('z')
# ax[2].set_xlim(0.1,0.7)
# if np.max(n_total)<30:
# ax[2].set_ylim(0,30)
# ax[3].plot(zs_gmi,ntot_gmi,'.')
# ax[3].set_xlabel('z')
# ax[3].set_xlim(0.1,0.7)
# ax[3].axvline(z_max_fit_gmi,ls='--',color='purple',label='z_max_fit:%.2f'%z_max_fit_gmi)
# ax[3].axvline(redshift,color='red',label='z:%.2f'%redshift)
# if np.max(ntot_gmi)<70:
# ax[3].set_ylim(0,70)
# ntot_gmi_max=np.max(ntot_gmi)
# zs_gmi_max=zs_gmi[np.argmax(ntot_gmi)]
# ax[3].axvline(zs_gmi_max,ls='--',color='pink',label='zs_gmi_max:%.2f'%zs_gmi_max)
# plt.tight_layout(rect=[0, 0., 1, 0.98])
# plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_gmi_%s_all_%.3f_%s_tilted.png' %
# (mode, signal_gmi, field), dpi=120)
# plt.close(fig)
# transparent=False
if transparent:
fig,ax=plt.subplots(figsize=(7,4))
ax.bar(bin_cen2, ns2, width=0.035, color='#1f77b4') #widht = 0.025
ax.bar(bin_cen1, ns1, width = 0.035, color='#ff7f0e') #width = 0.025
ax.axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax.set_xlabel('z')
ax.set_xlim(0.1,0.7)
if np.max(n_total)<30:
ax.set_ylim(0,30)
for axp in ax.spines:
ax.spines[axp].set_color('white')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/',
'redsq_transparent_%.3f_%s_tilted.png' % (signal_final,field))
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal_final,field), dpi=120, transparent=True)
plt.close(fig)
# red_dir='/Users/taweewat/Documents/red_sequence/'
# rich_filename = 'all_richness_%s.csv'%extra_name
# if not os.path.isfile(red_dir + rich_filename):
# os.system("cp %s %s"%(red_dir+'all_richness_gremove_lum_silk_zf2.5.csv',red_dir+rich_filename))
# df_richness=pd.read_csv(red_dir+rich_filename)
# df_richness[['Nmax','Nbkg_mean','Nbkg_median','zmax','amp','zmax_fit','gremove','lum_factor','density_factor']]=np.nan
# df_richness.to_csv(red_dir+rich_filename)
# df_richness=pd.read_csv(red_dir+rich_filename)
# df_richness=df_richness.copy()
# df_richness.loc[df_richness['name'] == field, 'Nmax'] = np.max(n_total)
# df_richness.loc[df_richness['name'] == field, 'Nbkg_mean'] = np.mean(n_total)
# df_richness.loc[df_richness['name'] == field, 'Nbkg_median'] = np.median(n_total)
# df_richness.loc[df_richness['name'] == field, 'zmax'] = z_max
# df_richness.loc[df_richness['name'] == field, 'amp'] = signal_final
# df_richness.loc[df_richness['name'] == field, 'zmax_fit'] = z_max_fit
# df_richness.loc[df_richness['name'] == field, 'gremove'] = nog
# df_richness.loc[df_richness['name'] == field, 'lum_factor'] = lum_factor
# df_richness.loc[df_richness['name'] == field, 'density_factor'] = density_factor
# df_richness.to_csv(red_dir+rich_filename,index=0)
red_dir='/Users/taweewat/Documents/red_sequence/'
rich_filename = 'all_richness_%s.csv'%extra_name
if not os.path.isfile(red_dir + rich_filename):
df_richness=pd.DataFrame(columns=['name','Nmax','Nbkg_mean','Nbkg_median','zmax','amp','zmax_fit','gremove','lum_factor','density_factor'])
df_richness.to_csv(red_dir+rich_filename)
df_richness=pd.read_csv(red_dir+rich_filename,index_col=0)
dic={'name':field, 'Nmax':np.max(n_total), 'Nbkg_mean':np.mean(n_total), 'Nbkg_median':np.median(n_total), 'zmax':z_max,\
'amp':signal_final, 'zmax_fit':z_max_fit, 'gremove':nog, 'lum_factor':lum_factor, 'density_factor':density_factor}
if field in df_richness['name'].values:
df_richness=df_richness[df_richness['name']!=field]
df_richness=df_richness.append(pd.Series(dic),ignore_index=True).copy()
df_richness.to_csv(red_dir+rich_filename)
# get member redshfit in the figure
if img_redshift:
image_redshift(field,signal,tuple(popt)[1],mode)
# get total images with red-sequence
if img_filp:
image_flip(field,signal,tuple(popt)[1],mode)
if colorerr:
return z_total, n_total, n_total_cerr
else:
return z_total, n_total, n_total_dup
def pisco_combine_imgs(fields, mode='psf', mode2mass=''):
dir1='/Users/taweewat/Documents/red_sequence/pisco_color_plots/psf_est/'
dir2='/Users/taweewat/Documents/red_sequence/pisco_color_plots/'
dir3='/Users/taweewat/Documents/red_sequence/pisco_color_plots/'
dirout='/Users/taweewat/Documents/red_sequence/pisco_all/'
myReg = re.compile(r'(redsq_richg_%s_all_.*%s.*png)' % (mode, field))
myReg2=re.compile(r'(\d{1,3}\.\d{1,3})')
names=[]
for text in os.listdir(dir3):
if myReg.search(text) != None:
names.append(myReg.search(text).group())
if names==[]:
print 'no files', field
signal=myReg2.search(names[0]).group()
img1=dir1+'psf_est3_'+field+'_i.png'
img15='/Users/taweewat/Documents/red_sequence/pisco_color_plots/uncer_%s_%s.png'%(mode,field)
# img2=dir2+'star_galaxy_sep_12_all'+field+'.png'
img2='/Users/taweewat/Documents/red_sequence/pisco_image_redshift/img_redshift_%s_%.3f_%s.png' %(mode,float(signal),field)
print img2
img3=dir3+names[0]
images_list=[img1, img2, img3, img15]
imgs=[]
try:
imgs = [ Image_PIL.open(i) for i in images_list ]
except:
print 'no image file', field
mw = imgs[2].width/2
h = imgs[0].height+imgs[1].height/1+imgs[2].height/2
result = Image_PIL.new("RGBA", (mw, h))
y,index=0,0
for i in imgs:
if index<3:
if (index==2):# or (index==1):
i=i.resize((i.width/2,i.height/2))
result.paste(i, (0, y))
y += i.size[1]
index+=1
elif index==3:
i=i.resize((i.width/2,i.height/2))
result.paste(i, (imgs[0].width,0))
result.save(dirout + 'all_combine_%s_%s_%s_%s_%s.png' %
(extra_name, mode2mass, myReg2.search(names[0]).group(), mode, field))
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
print 'remove', f
os.remove(os.path.join(dir, f))
def image_redshift(field,signal,redshift,mode):
df_total=pd.read_csv('/Users/taweewat/Documents/pisco_code/slr_output/galaxy_%s_final_%s.csv'%(mode,field),index_col=0)
df_star=pd.read_csv('/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv'%field,index_col=0)
# df_star=df_star[df_star['SG']>0.95]
hdu=fits.open('/Users/taweewat/Documents/pisco_code/final/coadd_c%s_i.fits'%field)
img=hdu[0].data.astype(float)
img -= np.median(img)
df_total['redshift_m']=df_total.apply(lambda row: ex.redshift_f(row), axis=1)
def size_f(row):
if not np.isnan(row['w_gmr']):
size=row['w_gmr']
if not np.isnan(row['w_rmi']):
size=row['w_rmi']
if np.isnan(row['w_rmi']) and np.isnan(row['w_gmr']):
size=0
return size
df_total['size_m']=df_total.apply(lambda row: size_f(row), axis=1)
df_total=df_total[df_total['redshift_m'] > 0].copy()
norm = matplotlib.colors.Normalize(vmin=0,vmax=500)
c_m = matplotlib.cm.Greys_r
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
normalize = matplotlib.colors.Normalize(vmin=0.1, vmax=0.7)
fig, (a0, a1) = plt.subplots(1,2, figsize=(30,18), gridspec_kw = {'width_ratios':[0.8, 1]})
# a0.imshow(img, cmap=c_m, norm=norm, origin='lower')
# a0.scatter(df_star['XWIN_IMAGE_i'].values,df_star['YWIN_IMAGE_i'].values,s=100, marker='*', facecolors='none', edgecolors='yellow', label='star')
# df1i=df_total[df_total['w_rmi']>0.1]
# df2i=df_total[df_total['w_rmi']<=0.1]
# # a0.scatter(df1i['XWIN_IMAGE_i'].values,df1i['YWIN_IMAGE_i'].values,s=100, facecolors='none', edgecolors='blue')
# a0.scatter(df1i['XWIN_IMAGE_i'].values, df1i['YWIN_IMAGE_i'].values, s=100, c=df1i['size_m'].values, cmap='RdYlGn')
# a0.scatter(df2i['XWIN_IMAGE_i'].values,df2i['YWIN_IMAGE_i'].values, s=100, facecolors='none', edgecolors='white')
# a0.set_xlim(0,1600)
# a0.set_ylim(0, 2250)
try:
img2 = mpimg.imread('/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img4.jpeg' % field)
except:
img2 = mpimg.imread('/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img.jpeg' % field)
imgplot = a0.imshow(img2)
a0.axis('off')
a0.annotate('Redshift: %.3f\nRichness: %.2f' %
(redshift, signal), xy=(150, 100), color='white')
a1.imshow(img, cmap=c_m, norm=norm, origin='lower')
a1.scatter(df_star['XWIN_IMAGE_i'].values,df_star['YWIN_IMAGE_i'].values, s=300,edgecolor='orange', facecolor='none',lw=3)
#,s=100, marker='*', facecolors='none', edgecolors='yellow', label='star')
axi = a1.scatter(df_total['XWIN_IMAGE_i'].values, df_total['YWIN_IMAGE_i'].values,
s=(df_total['size_m'].values * 200)+30, c=df_total['redshift_m'].values, cmap='tab20b', norm=normalize)
plt.colorbar(axi) # df_total['size_m'].values*300
a1.set_xlim(0, 1600)
a1.set_ylim(0, 2250)
plt.tight_layout()
left, bottom, width, height = [0.05, 0.24, 0.3, 0.2]
ax2 = fig.add_axes([left, bottom, width, height])
ax2.imshow(mpimg.imread(
'/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal, field)))
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('off')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_image_redshift/img_redshift_%s_%.3f_%s.png' %
(mode,signal,field), dpi=50)
plt.close(fig)
def image_flip(field, signal, redshift, mode):
img = mpimg.imread(
'/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img.jpeg' % field)
fig, ax = plt.subplots(figsize=(7, 7))
imgplot = ax.imshow(img)
ax.axis('off')
ax.annotate('Redshift: %.3f\nRichness: %.2f' %
(redshift, signal), xy=(150, 100), color='white')
left, bottom, width, height = [0.2, 0.18, 0.3, 0.2]
ax2 = fig.add_axes([left, bottom, width, height])
ax2.imshow(mpimg.imread(
'/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal, field)))
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('off')
# plt.tight_layout()
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_image_redshift/image_flip_%s_%.3f_%s.png' %
(mode, signal, field), dpi=200)
plt.close(fig)
if __name__ == "__main__":
"""
execute:
python pisco_pipeline/pisco_photometry_all.py CHIPS111 psf slr
#updated version with no2mass option for no more comparison with known 2mass stars
python pisco_pipeline/pisco_photometry_all.py CHIPS111 psf allslr no2mass
"""
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
field = str(sys.argv[1])
mode = str(sys.argv[2]) #aper, psf, auto, hybrid
all_argv=sys.argv[3:] #allslr, slr, noslr
if (all_argv[0]=='allslr') | (all_argv[0]=='slr'):
slr=str(all_argv[0])
slr_param=True
elif all_argv[0]=='noslr':
slr='no_slr'
slr_param=False
if all_argv[1]=='2mass':
mode2mass=''
elif all_argv[1]=='no2mass':
mode2mass='_no2mass'
home='/Users/taweewat/Documents/pisco_code/' #09, 171208
# dirs=['ut170103/','ut170104/','ut170619/','ut170621/','ut170624/','ut171208/','ut171209/','ut171212/']
dirs=['ut190412','ut190413']
# 'ut171208/', 'ut171209/','ut171212/', 'ut170621/', 'ut170624/'
# dirs = ['ut170621/','ut170624/']
# dirs = ['ut170619/']
# dirs = ['ut170103/']
names=[]
myReg=re.compile(r'(CHIPS\d{4}[+-]\d{4})|(Field\d{3})')
for di in dirs:
dir=home+di
for text in os.listdir(dir):
if myReg.search(text) != None:
names.append(myReg.search(text).group())
all_fields=list(set(names))
# print all_fields
infile = open('/Users/taweewat/Documents/xray_project/code_github/allremove_chips.txt', 'r')
exception = [i.strip() for i in infile.readlines()]
all_fields_cut = all_fields[:]
all_fields_cut = ['SDSS603','SDSS501','SDSS123']
print all_fields_cut
# all_fields_cut = ['CHIPS1422-2728']
notgoflag=True
z_total_all,n_total_all,n_total_dup_all=[],[],[]
for index, field in enumerate(all_fields_cut):
print field, '%i/%i' % (index, len(all_fields_cut))
# if field == 'CHIPS0122-2646':
# notgoflag = False; continue
# if notgoflag:
# continue
if field in exception:
continue
if field in ['CHIPS1933-1511']:
continue
if slr=='allslr':
print 'allslr'
pisco_photometry_v4(field)
elif slr=='slr':
# star_galaxy_bleem(field)
pisco_cut_frame(field)
pisco_photometry_psf_v4(field, mode=mode, mode2mass=mode2mass, slr=slr_param)
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/'\
,r'(redsq_%s_all_.*%s.*png)'%(mode,field))
# pisco_tilt_resequence(field, mode='psf', mode2mass='')
z_total=pisco_tilt_resequence(field, mode=mode, mode2mass=mode2mass)
# z_total_all.append(z_total)
# n_total_all.append(n_total)
# n_total_dup_all.append(n_total_dup)
# pisco_combine_imgs(field, mode=mode, mode2mass=mode2mass)
# pickle.dump( [z_total_all,n_total_all,n_total_dup_all], open( "pickle_all_richness_%s.pickle"%extra_name, "wb" ) )
# print 'save pickle fie at', "pickle_all_richness_%s.pickle" % extra_name
elif slr == 'no_slr':
pisco_cut_frame(field)
pisco_photometry_psf_v4(field, mode=mode, mode2mass=mode2mass, slr=slr_param)
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/'\
,r'(redsq_%s_all_.*%s.*png)'%(mode,field))
z_total=pisco_tilt_resequence(field, mode=mode, mode2mass=mode2mass)
# z_total_all.append(z_total)
# n_total_all.append(n_total)
# n_total_dup_all.append(n_total_dup)
# pisco_combine_imgs(field, mode=mode, mode2mass=mode2mass)
# pickle.dump( [z_total_all,n_total_all,n_total_dup_all], open( "pickle_all_richness_%s.pickle"%extra_name, "wb" ) )
# print 'save pickle fie at', "pickle_all_richness_%s.pickle" % extra_name
purge('final', "proj_coadd_c%s_.*\.fits" % field)
purge('.', "proto_psf_%s_.*\.fits" % field)
purge('.', "samp_psf_%s_.*\.fits" % field)
purge('.', "resi_psf_%s_.*\.fits" % field)
purge('.', "snap_psf_%s_.*\.fits" % field)
purge('.', "chi_psf_%s_.*\.fits" % field)
# purge('psfex_output', "psf_%s_.*\.fits" % field)
# purge('slr_output', "a_psf_%s_.*\.fits" % field)
purge('final', "coadd_c%s_sq_.*\.fits" % field)
| mit |
burakbayramli/classnotes | stat/stat_065_powerlaw/powerlaw.py | 2 | 106244 | #The MIT License (MIT)
#
#Copyright (c) 2013 Jeff Alstott
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
# as described in https://docs.python.org/2/library/functions.html#print
from __future__ import print_function
import sys
__version__ = "1.3.4"
class Fit(object):
"""
A fit of a data set to various probability distributions, namely power
laws. For fits to power laws, the methods of Clauset et al. 2007 are used.
These methods identify the portion of the tail of the distribution that
follows a power law, beyond a value xmin. If no xmin is
provided, the optimal one is calculated and assigned at initialization.
Parameters
----------
data : list or array
discrete : boolean, optional
Whether the data is discrete (integers).
xmin : int or float, optional
The data value beyond which distributions should be fitted. If
None an optimal one will be calculated.
xmax : int or float, optional
The maximum value of the fitted distributions.
estimate_discrete : bool, optional
Whether to estimate the fit of a discrete power law using fast
analytical methods, instead of calculating the fit exactly with
slow numerical methods. Very accurate with xmin>6
sigma_threshold : float, optional
Upper limit on the standard error of the power law fit. Used after
fitting, when identifying valid xmin values.
parameter_range : dict, optional
Dictionary of valid parameter ranges for fitting. Formatted as a
dictionary of parameter names ('alpha' and/or 'sigma') and tuples
of their lower and upper limits (ex. (1.5, 2.5), (None, .1)
"""
def __init__(self, data,
discrete=False,
xmin=None, xmax=None,
fit_method='Likelihood',
estimate_discrete=True,
discrete_approximation='round',
sigma_threshold=None,
parameter_range=None,
fit_optimizer=None,
xmin_distance='D',
**kwargs):
self.data_original = data
# import logging
from numpy import asarray
self.data = asarray(self.data_original, dtype='float')
self.discrete = discrete
self.fit_method = fit_method
self.estimate_discrete = estimate_discrete
self.discrete_approximation = discrete_approximation
self.sigma_threshold = sigma_threshold
self.parameter_range = parameter_range
self.given_xmin = xmin
self.given_xmax = xmax
self.xmin = self.given_xmin
self.xmax = self.given_xmax
self.xmin_distance = xmin_distance
if 0 in self.data:
print("Values less than or equal to 0 in data. Throwing out 0 or negative values", file=sys.stderr)
self.data = self.data[self.data>0]
if self.xmax:
self.xmax = float(self.xmax)
self.fixed_xmax = True
n_above_max = sum(self.data>self.xmax)
self.data = self.data[self.data<=self.xmax]
else:
n_above_max = 0
self.fixed_xmax = False
if not all(self.data[i] <= self.data[i+1] for i in range(len(self.data)-1)):
from numpy import sort
self.data = sort(self.data)
self.fitting_cdf_bins, self.fitting_cdf = cdf(self.data, xmin=None, xmax=self.xmax)
if xmin and type(xmin)!=tuple and type(xmin)!=list:
self.fixed_xmin = True
self.xmin = float(xmin)
self.noise_flag = None
pl = Power_Law(xmin=self.xmin,
xmax=self.xmax,
discrete=self.discrete,
fit_method=self.fit_method,
estimate_discrete=self.estimate_discrete,
data=self.data,
parameter_range=self.parameter_range)
setattr(self,self.xmin_distance, getattr(pl, self.xmin_distance))
self.alpha = pl.alpha
self.sigma = pl.sigma
#self.power_law = pl
else:
self.fixed_xmin=False
print("Calculating best minimal value for power law fit", file=sys.stderr)
self.find_xmin()
self.data = self.data[self.data>=self.xmin]
self.n = float(len(self.data))
self.n_tail = self.n + n_above_max
self.supported_distributions = {'power_law': Power_Law,
'lognormal': Lognormal,
'exponential': Exponential,
'truncated_power_law': Truncated_Power_Law,
'stretched_exponential': Stretched_Exponential,
}
#'gamma': None}
def __getattr__(self, name):
if name in self.supported_distributions.keys():
#from string import capwords
#dist = capwords(name, '_')
#dist = globals()[dist] #Seems a hack. Might try import powerlaw; getattr(powerlaw, dist)
dist = self.supported_distributions[name]
if dist == Power_Law:
parameter_range = self.parameter_range
else:
parameter_range = None
setattr(self,
name,
dist(data=self.data,
xmin=self.xmin,
xmax=self.xmax,
discrete=self.discrete,
fit_method=self.fit_method,
estimate_discrete=self.estimate_discrete,
discrete_approximation=self.discrete_approximation,
parameter_range=parameter_range,
parent_Fit=self))
return getattr(self, name)
else:
raise AttributeError(name)
def find_xmin(self, xmin_distance=None):
"""
Returns the optimal xmin beyond which the scaling regime of the power
law fits best. The attribute self.xmin of the Fit object is also set.
The optimal xmin beyond which the scaling regime of the power law fits
best is identified by minimizing the Kolmogorov-Smirnov distance
between the data and the theoretical power law fit.
This is the method of Clauset et al. 2007.
"""
from numpy import unique, asarray, argmin
#Much of the rest of this function was inspired by Adam Ginsburg's plfit code,
#specifically the mapping and sigma threshold behavior:
#http://code.google.com/p/agpy/source/browse/trunk/plfit/plfit.py?spec=svn359&r=357
if not self.given_xmin:
possible_xmins = self.data
else:
possible_ind = min(self.given_xmin)<=self.data
possible_ind *= self.data<=max(self.given_xmin)
possible_xmins = self.data[possible_ind]
xmins, xmin_indices = unique(possible_xmins, return_index=True)
#Don't look at last xmin, as that's also the xmax, and we want to at least have TWO points to fit!
xmins = xmins[:-1]
xmin_indices = xmin_indices[:-1]
if xmin_distance is None:
xmin_distance = self.xmin_distance
if len(xmins)<=0:
print("Less than 2 unique data values left after xmin and xmax "
"options! Cannot fit. Returning nans.", file=sys.stderr)
from numpy import nan, array
self.xmin = nan
self.D = nan
self.V = nan
self.Asquare = nan
self.Kappa = nan
self.alpha = nan
self.sigma = nan
self.n_tail = nan
setattr(self, xmin_distance+'s', array([nan]))
self.alphas = array([nan])
self.sigmas = array([nan])
self.in_ranges = array([nan])
self.xmins = array([nan])
self.noise_flag = True
return self.xmin
def fit_function(xmin):
pl = Power_Law(xmin=xmin,
xmax=self.xmax,
discrete=self.discrete,
estimate_discrete=self.estimate_discrete,
fit_method=self.fit_method,
data=self.data,
parameter_range=self.parameter_range,
parent_Fit=self)
return getattr(pl, xmin_distance), pl.alpha, pl.sigma, pl.in_range()
fits = asarray(list(map(fit_function, xmins)))
# logging.warning(fits.shape)
setattr(self, xmin_distance+'s', fits[:,0])
self.alphas = fits[:,1]
self.sigmas = fits[:,2]
self.in_ranges = fits[:,3].astype(bool)
self.xmins = xmins
good_values = self.in_ranges
if self.sigma_threshold:
good_values = good_values * (self.sigmas < self.sigma_threshold)
if good_values.all():
min_D_index = argmin(getattr(self, xmin_distance+'s'))
self.noise_flag = False
elif not good_values.any():
min_D_index = argmin(getattr(self, xmin_distance+'s'))
self.noise_flag = True
else:
from numpy.ma import masked_array
masked_Ds = masked_array(getattr(self, xmin_distance+'s'), mask=-good_values)
min_D_index = masked_Ds.argmin()
self.noise_flag = False
if self.noise_flag:
print("No valid fits found.", file=sys.stderr)
#Set the Fit's xmin to the optimal xmin
self.xmin = xmins[min_D_index]
setattr(self, xmin_distance, getattr(self, xmin_distance+'s')[min_D_index])
self.alpha = self.alphas[min_D_index]
self.sigma = self.sigmas[min_D_index]
#Update the fitting CDF given the new xmin, in case other objects, like
#Distributions, want to use it for fitting (like if they do KS fitting)
self.fitting_cdf_bins, self.fitting_cdf = self.cdf()
return self.xmin
def nested_distribution_compare(self, dist1, dist2, nested=True, **kwargs):
"""
Returns the loglikelihood ratio, and its p-value, between the two
distribution fits, assuming the candidate distributions are nested.
Parameters
----------
dist1 : string
Name of the first candidate distribution (ex. 'power_law')
dist2 : string
Name of the second candidate distribution (ex. 'exponential')
nested : bool or None, optional
Whether to assume the candidate distributions are nested versions
of each other. None assumes not unless the name of one distribution
is a substring of the other. True by default.
Returns
-------
R : float
Loglikelihood ratio of the two distributions' fit to the data. If
greater than 0, the first distribution is preferred. If less than
0, the second distribution is preferred.
p : float
Significance of R
"""
return self.distribution_compare(dist1, dist2, nested=nested, **kwargs)
def distribution_compare(self, dist1, dist2, nested=None, **kwargs):
"""
Returns the loglikelihood ratio, and its p-value, between the two
distribution fits, assuming the candidate distributions are nested.
Parameters
----------
dist1 : string
Name of the first candidate distribution (ex. 'power_law')
dist2 : string
Name of the second candidate distribution (ex. 'exponential')
nested : bool or None, optional
Whether to assume the candidate distributions are nested versions
of each other. None assumes not unless the name of one distribution
is a substring of the other.
Returns
-------
R : float
Loglikelihood ratio of the two distributions' fit to the data. If
greater than 0, the first distribution is preferred. If less than
0, the second distribution is preferred.
p : float
Significance of R
"""
if (dist1 in dist2) or (dist2 in dist1) and nested is None:
print("Assuming nested distributions", file=sys.stderr)
nested = True
dist1 = getattr(self, dist1)
dist2 = getattr(self, dist2)
loglikelihoods1 = dist1.loglikelihoods(self.data)
loglikelihoods2 = dist2.loglikelihoods(self.data)
return loglikelihood_ratio(
loglikelihoods1, loglikelihoods2,
nested=nested,
**kwargs)
def loglikelihood_ratio(self, dist1, dist2, nested=None, **kwargs):
"""
Another name for distribution_compare.
"""
return self.distribution_compare(dist1, dist2, nested=nested, **kwargs)
def cdf(self, original_data=False, survival=False, **kwargs):
"""
Returns the cumulative distribution function of the data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to return the complementary cumulative distribution
function, 1-CDF, also known as the survival function.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
return cdf(data, xmin=xmin, xmax=xmax, survival=survival,
**kwargs)
def ccdf(self, original_data=False, survival=True, **kwargs):
"""
Returns the complementary cumulative distribution function of the data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to return the complementary cumulative distribution
function, also known as the survival function, or the cumulative
distribution function, 1-CCDF.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is greater than or equal to X.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
return cdf(data, xmin=xmin, xmax=xmax, survival=survival,
**kwargs)
def pdf(self, original_data=False, **kwargs):
"""
Returns the probability density function (normalized histogram) of the
data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
Returns
-------
bin_edges : array
The edges of the bins of the probability density function.
probabilities : array
The portion of the data that is within the bin. Length 1 less than
bin_edges, as it corresponds to the spaces between them.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
edges, hist = pdf(data, xmin=xmin, xmax=xmax, **kwargs)
return edges, hist
def plot_cdf(self, ax=None, original_data=False, survival=False, **kwargs):
"""
Plots the CDF to a new figure or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_cdf(data, ax=ax, survival=survival, **kwargs)
def plot_ccdf(self, ax=None, original_data=False, survival=True, **kwargs):
"""
Plots the CCDF to a new figure or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_cdf(data, ax=ax, survival=survival, **kwargs)
def plot_pdf(self, ax=None, original_data=False,
linear_bins=False, **kwargs):
"""
Plots the probability density function (PDF) or the data to a new figure
or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
linear_bins : bool, optional
Whether to use linearly spaced bins (True) or logarithmically
spaced bins (False). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_pdf(data, ax=ax, linear_bins=linear_bins, **kwargs)
class Distribution(object):
"""
An abstract class for theoretical probability distributions. Can be created
with particular parameter values, or fitted to a dataset. Fitting is
by maximum likelihood estimation by default.
Parameters
----------
xmin : int or float, optional
The data value beyond which distributions should be fitted. If
None an optimal one will be calculated.
xmax : int or float, optional
The maximum value of the fitted distributions.
discrete : boolean, optional
Whether the distribution is discrete (integers).
data : list or array, optional
The data to which to fit the distribution. If provided, the fit will
be created at initialization.
fit_method : "Likelihood" or "KS", optional
Method for fitting the distribution. "Likelihood" is maximum Likelihood
estimation. "KS" is minimial distance estimation using The
Kolmogorov-Smirnov test.
parameters : tuple or list, optional
The parameters of the distribution. Will be overridden if data is
given or the fit method is called.
parameter_range : dict, optional
Dictionary of valid parameter ranges for fitting. Formatted as a
dictionary of parameter names ('alpha' and/or 'sigma') and tuples
of their lower and upper limits (ex. (1.5, 2.5), (None, .1)
initial_parameters : tuple or list, optional
Initial values for the parameter in the fitting search.
discrete_approximation : "round", "xmax" or int, optional
If the discrete form of the theoeretical distribution is not known,
it can be estimated. One estimation method is "round", which sums
the probability mass from x-.5 to x+.5 for each data point. The other
option is to calculate the probability for each x from 1 to N and
normalize by their sum. N can be "xmax" or an integer.
parent_Fit : Fit object, optional
A Fit object from which to use data, if it exists.
"""
def __init__(self,
xmin=1, xmax=None,
discrete=False,
fit_method='Likelihood',
data=None,
parameters=None,
parameter_range=None,
initial_parameters=None,
discrete_approximation='round',
parent_Fit=None,
**kwargs):
self.xmin = xmin
self.xmax = xmax
self.discrete = discrete
self.fit_method = fit_method
self.discrete_approximation = discrete_approximation
self.parameter1 = None
self.parameter2 = None
self.parameter3 = None
self.parameter1_name = None
self.parameter2_name = None
self.parameter3_name = None
if parent_Fit:
self.parent_Fit = parent_Fit
if parameters is not None:
self.parameters(parameters)
if parameter_range:
self.parameter_range(parameter_range)
if initial_parameters:
self._given_initial_parameters(initial_parameters)
if (data is not None) and not (parameter_range and self.parent_Fit):
self.fit(data)
def fit(self, data=None, suppress_output=False):
"""
Fits the parameters of the distribution to the data. Uses options set
at initialization.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
if self.fit_method=='Likelihood':
def fit_function(params):
self.parameters(params)
return -sum(self.loglikelihoods(data))
elif self.fit_method=='KS':
def fit_function(params):
self.parameters(params)
self.KS(data)
return self.D
from scipy.optimize import fmin
parameters, negative_loglikelihood, iter, funcalls, warnflag, = \
fmin(
lambda params: fit_function(params),
self.initial_parameters(data),
full_output=1,
disp=False)
self.parameters(parameters)
if not self.in_range():
self.noise_flag=True
else:
self.noise_flag=False
if self.noise_flag and not suppress_output:
print("No valid fits found.", file=sys.stderr)
self.loglikelihood =-negative_loglikelihood
self.KS(data)
def KS(self, data=None):
"""
Returns the Kolmogorov-Smirnov distance D between the distribution and
the data. Also sets the properties D+, D-, V (the Kuiper testing
statistic), and Kappa (1 + the average difference between the
theoretical and empirical distributions).
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
if len(data)<2:
print("Not enough data. Returning nan", file=sys.stderr)
from numpy import nan
self.D = nan
self.D_plus = nan
self.D_minus = nan
self.Kappa = nan
self.V = nan
self.Asquare = nan
return self.D
if hasattr(self, 'parent_Fit'):
bins = self.parent_Fit.fitting_cdf_bins
Actual_CDF = self.parent_Fit.fitting_cdf
ind = bins>=self.xmin
bins = bins[ind]
Actual_CDF = Actual_CDF[ind]
dropped_probability = Actual_CDF[0]
Actual_CDF -= dropped_probability
Actual_CDF /= 1-dropped_probability
else:
bins, Actual_CDF = cdf(data)
Theoretical_CDF = self.cdf(bins)
CDF_diff = Theoretical_CDF - Actual_CDF
self.D_plus = CDF_diff.max()
self.D_minus = -1.0*CDF_diff.min()
from numpy import mean
self.Kappa = 1 + mean(CDF_diff)
self.V = self.D_plus + self.D_minus
self.D = max(self.D_plus, self.D_minus)
self.Asquare = sum((
(CDF_diff**2) /
(Theoretical_CDF * (1 - Theoretical_CDF))
)[1:]
)
return self.D
def ccdf(self,data=None, survival=True):
"""
The complementary cumulative distribution function (CCDF) of the
theoretical distribution. Calculated for the values given in data
within xmin and xmax, if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
True by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return self.cdf(data=data, survival=survival)
def cdf(self,data=None, survival=False):
"""
The cumulative distribution function (CDF) of the theoretical
distribution. Calculated for the values given in data within xmin and
xmax, if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
if self._cdf_xmin==1:
#If cdf_xmin is 1, it means we don't have the numerical accuracy to
#calculate this tail. So we make everything 1, indicating
#we're at the end of the tail. Such an xmin should be thrown
#out by the KS test.
from numpy import ones
CDF = ones(n)
return CDF
CDF = self._cdf_base_function(data) - self._cdf_xmin
norm = 1 - self._cdf_xmin
if self.xmax:
norm = norm - (1 - self._cdf_base_function(self.xmax))
CDF = CDF/norm
if survival:
CDF = 1 - CDF
possible_numerical_error = False
from numpy import isnan, min
if isnan(min(CDF)):
print("'nan' in fit cumulative distribution values.", file=sys.stderr)
possible_numerical_error = True
#if 0 in CDF or 1 in CDF:
# print("0 or 1 in fit cumulative distribution values.", file=sys.stderr)
# possible_numerical_error = True
if possible_numerical_error:
print("Likely underflow or overflow error: the optimal fit for this distribution gives values that are so extreme that we lack the numerical precision to calculate them.", file=sys.stderr)
return CDF
@property
def _cdf_xmin(self):
return self._cdf_base_function(self.xmin)
def pdf(self, data=None):
"""
Returns the probability density function (normalized histogram) of the
theoretical distribution for the values in data within xmin and xmax,
if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
Returns
-------
probabilities : array
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
if not self.discrete:
f = self._pdf_base_function(data)
C = self._pdf_continuous_normalizer
likelihoods = f*C
else:
if self._pdf_discrete_normalizer:
f = self._pdf_base_function(data)
C = self._pdf_discrete_normalizer
likelihoods = f*C
elif self.discrete_approximation=='round':
lower_data = data-.5
upper_data = data+.5
#Temporarily expand xmin and xmax to be able to grab the extra bit of
#probability mass beyond the (integer) values of xmin and xmax
#Note this is a design decision. One could also say this extra
#probability "off the edge" of the distribution shouldn't be included,
#and that implementation is retained below, commented out. Note, however,
#that such a cliff means values right at xmin and xmax have half the width to
#grab probability from, and thus are lower probability than they would otherwise
#be. This is particularly concerning for values at xmin, which are typically
#the most likely and greatly influence the distribution's fit.
self.xmin -= .5
if self.xmax:
self.xmax += .5
#Clean data for invalid values before handing to cdf, which will purge them
#lower_data[lower_data<self.xmin] +=.5
#if self.xmax:
# upper_data[upper_data>self.xmax] -=.5
likelihoods = self.cdf(upper_data)-self.cdf(lower_data)
self.xmin +=.5
if self.xmax:
self.xmax -= .5
else:
if self.discrete_approximation=='xmax':
upper_limit = self.xmax
else:
upper_limit = self.discrete_approximation
# from mpmath import exp
from numpy import arange
X = arange(self.xmin, upper_limit+1)
PDF = self._pdf_base_function(X)
PDF = (PDF/sum(PDF)).astype(float)
likelihoods = PDF[(data-self.xmin).astype(int)]
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
return likelihoods
@property
def _pdf_continuous_normalizer(self):
C = 1 - self._cdf_xmin
if self.xmax:
C -= 1 - self._cdf_base_function(self.xmax+1)
C = 1.0/C
return C
@property
def _pdf_discrete_normalizer(self):
return False
def parameter_range(self, r, initial_parameters=None):
"""
Set the limits on the range of valid parameters to be considered while
fitting.
Parameters
----------
r : dict
A dictionary of the parameter range. Restricted parameter
names are keys, and with tuples of the form (lower_bound,
upper_bound) as values.
initial_parameters : tuple or list, optional
Initial parameter values to start the fitting search from.
"""
from types import FunctionType
if type(r)==FunctionType:
self._in_given_parameter_range = r
else:
self._range_dict = r
if initial_parameters:
self._given_initial_parameters = initial_parameters
if self.parent_Fit:
self.fit(self.parent_Fit.data)
def in_range(self):
"""
Whether the current parameters of the distribution are within the range
of valid parameters.
"""
try:
r = self._range_dict
result = True
for k in r.keys():
#For any attributes we've specificed, make sure we're above the lower bound
#and below the lower bound (if they exist). This must be true of all of them.
lower_bound, upper_bound = r[k]
if upper_bound is not None:
result *= getattr(self, k) < upper_bound
if lower_bound is not None:
result *= getattr(self, k) > lower_bound
return result
except AttributeError:
try:
in_range = self._in_given_parameter_range(self)
except AttributeError:
in_range = self._in_standard_parameter_range()
return bool(in_range)
def initial_parameters(self, data):
"""
Return previously user-provided initial parameters or, if never
provided, calculate new ones. Default initial parameter estimates are
unique to each theoretical distribution.
"""
try:
return self._given_initial_parameters
except AttributeError:
return self._initial_parameters(data)
def likelihoods(self, data):
"""
The likelihoods of the observed data from the theoretical distribution.
Another name for the probabilities or probability density function.
"""
return self.pdf(data)
def loglikelihoods(self, data):
"""
The logarithm of the likelihoods of the observed data from the
theoretical distribution.
"""
from numpy import log
return log(self.likelihoods(data))
def plot_ccdf(self, data=None, ax=None, survival=True, **kwargs):
"""
Plots the complementary cumulative distribution function (CDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
return self.plot_cdf(data, ax=None, survival=survival, **kwargs)
def plot_cdf(self, data=None, ax=None, survival=False, **kwargs):
"""
Plots the cumulative distribution function (CDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
from numpy import unique
bins = unique(trim_to_range(data, xmin=self.xmin, xmax=self.xmax))
CDF = self.cdf(bins, survival=survival)
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, CDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, CDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def plot_pdf(self, data=None, ax=None, **kwargs):
"""
Plots the probability density function (PDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
from numpy import unique
bins = unique(trim_to_range(data, xmin=self.xmin, xmax=self.xmax))
PDF = self.pdf(bins)
from numpy import nan
PDF[PDF==0] = nan
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, PDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, PDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def generate_random(self,n=1, estimate_discrete=None):
"""
Generates random numbers from the theoretical probability distribution.
If xmax is present, it is currently ignored.
Parameters
----------
n : int or float
The number of random numbers to generate
estimate_discrete : boolean
For discrete distributions, whether to use a faster approximation of
the random number generator. If None, attempts to inherit
the estimate_discrete behavior used for fitting from the Distribution
object or the parent Fit object, if present. Approximations only
exist for some distributions (namely the power law). If an
approximation does not exist an estimate_discrete setting of True
will not be inherited.
Returns
-------
r : array
Random numbers drawn from the distribution
"""
from numpy.random import rand
from numpy import array
r = rand(n)
if not self.discrete:
x = self._generate_random_continuous(r)
else:
if (estimate_discrete and not hasattr(self, '_generate_random_discrete_estimate') ):
raise AttributeError("This distribution does not have an "
"estimation of the discrete form for generating simulated "
"data. Try the exact form with estimate_discrete=False.")
if estimate_discrete is None:
if not hasattr(self, '_generate_random_discrete_estimate'):
estimate_discrete = False
elif hasattr(self, 'estimate_discrete'):
estimate_discrete = self.estimate_discrete
elif hasattr('parent_Fit'):
estimate_discrete = self.parent_Fit.estimate_discrete
else:
estimate_discrete = False
if estimate_discrete:
x = self._generate_random_discrete_estimate(r)
else:
x = array([self._double_search_discrete(R) for R in r],
dtype='float')
return x
def _double_search_discrete(self, r):
#Find a range from x1 to x2 that our random probability fits between
x2 = int(self.xmin)
while self.ccdf(data=[x2]) >= (1 - r):
x1 = x2
x2 = 2*x1
#Use binary search within that range to find the exact answer, up to
#the limit of being between two integers.
x = bisect_map(x1, x2, self.ccdf, 1-r)
return x
class Power_Law(Distribution):
def __init__(self, estimate_discrete=True, **kwargs):
self.estimate_discrete = estimate_discrete
Distribution.__init__(self, **kwargs)
def parameters(self, params):
self.alpha = params[0]
self.parameter1 = self.alpha
self.parameter1_name = 'alpha'
@property
def name(self):
return "power_law"
@property
def sigma(self):
#Only is calculable after self.fit is started, when the number of data points is
#established
from numpy import sqrt
return (self.alpha - 1) / sqrt(self.n)
def _in_standard_parameter_range(self):
return self.alpha>1
def fit(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
self.n = len(data)
from numpy import log, sum
if not self.discrete and not self.xmax:
self.alpha = 1 + (self.n / sum(log(data/self.xmin)))
if not self.in_range():
Distribution.fit(self, data, suppress_output=True)
self.KS(data)
elif self.discrete and self.estimate_discrete and not self.xmax:
self.alpha = 1 + (self.n / sum(log(data / (self.xmin - .5))))
if not self.in_range():
Distribution.fit(self, data, suppress_output=True)
self.KS(data)
else:
Distribution.fit(self, data, suppress_output=True)
if not self.in_range():
self.noise_flag=True
else:
self.noise_flag=False
def _initial_parameters(self, data):
from numpy import log, sum
return 1 + len(data)/sum(log(data / (self.xmin)))
def _cdf_base_function(self, x):
if self.discrete:
from scipy.special import zeta
CDF = 1 - zeta(self.alpha, x)
else:
#Can this be reformulated to not reference xmin? Removal of the probability
#before xmin and after xmax is handled in Distribution.cdf(), so we don't
#strictly need this element. It doesn't hurt, for the moment.
CDF = 1-(x/self.xmin)**(-self.alpha+1)
return CDF
def _pdf_base_function(self, x):
return x**-self.alpha
@property
def _pdf_continuous_normalizer(self):
return (self.alpha-1) * self.xmin**(self.alpha-1)
@property
def _pdf_discrete_normalizer(self):
C = 1.0 - self._cdf_xmin
if self.xmax:
C -= 1 - self._cdf_base_function(self.xmax+1)
C = 1.0/C
return C
def _generate_random_continuous(self, r):
return self.xmin * (1 - r) ** (-1/(self.alpha - 1))
def _generate_random_discrete_estimate(self, r):
x = (self.xmin - 0.5) * (1 - r) ** (-1/(self.alpha - 1)) + 0.5
from numpy import around
return around(x)
class Exponential(Distribution):
def parameters(self, params):
self.Lambda = params[0]
self.parameter1 = self.Lambda
self.parameter1_name = 'lambda'
@property
def name(self):
return "exponential"
def _initial_parameters(self, data):
from numpy import mean
return 1/mean(data)
def _in_standard_parameter_range(self):
return self.Lambda>0
def _cdf_base_function(self, x):
from numpy import exp
CDF = 1 - exp(-self.Lambda*x)
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return exp(-self.Lambda * x)
@property
def _pdf_continuous_normalizer(self):
from numpy import exp
return self.Lambda * exp(self.Lambda * self.xmin)
@property
def _pdf_discrete_normalizer(self):
from numpy import exp
C = (1 - exp(-self.Lambda)) * exp(self.Lambda * self.xmin)
if self.xmax:
Cxmax = (1 - exp(-self.Lambda)) * exp(self.Lambda * self.xmax)
C = 1.0/C - 1.0/Cxmax
C = 1.0/C
return C
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
likelihoods = self.Lambda*exp(self.Lambda*(self.xmin-data))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def loglikelihoods(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import log
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
loglikelihoods = log(self.Lambda) + (self.Lambda*(self.xmin-data))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
loglikelihoods[loglikelihoods==0] = log(10**float_info.min_10_exp)
else:
loglikelihoods = Distribution.loglikelihoods(self, data)
return loglikelihoods
def _generate_random_continuous(self, r):
from numpy import log
return self.xmin - (1/self.Lambda) * log(1-r)
class Stretched_Exponential(Distribution):
def parameters(self, params):
self.Lambda = params[0]
self.parameter1 = self.Lambda
self.parameter1_name = 'lambda'
self.beta = params[1]
self.parameter2 = self.beta
self.parameter2_name = 'beta'
@property
def name(self):
return "stretched_exponential"
def _initial_parameters(self, data):
from numpy import mean
return (1/mean(data), 1)
def _in_standard_parameter_range(self):
return self.Lambda>0 and self.beta>0
def _cdf_base_function(self, x):
from numpy import exp
CDF = 1 - exp(-(self.Lambda*x)**self.beta)
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return (((x*self.Lambda)**(self.beta-1)) *
exp(-((self.Lambda*x)**self.beta)))
@property
def _pdf_continuous_normalizer(self):
from numpy import exp
C = self.beta*self.Lambda*exp((self.Lambda*self.xmin)**self.beta)
return C
@property
def _pdf_discrete_normalizer(self):
return False
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
likelihoods = ((data*self.Lambda)**(self.beta-1) *
self.beta * self.Lambda *
exp((self.Lambda*self.xmin)**self.beta -
(self.Lambda*data)**self.beta))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def loglikelihoods(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import log
loglikelihoods = (
log((data*self.Lambda)**(self.beta-1) *
self.beta * self. Lambda) +
(self.Lambda*self.xmin)**self.beta -
(self.Lambda*data)**self.beta)
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
from numpy import inf
loglikelihoods[loglikelihoods==-inf] = log(10**float_info.min_10_exp)
else:
loglikelihoods = Distribution.loglikelihoods(self, data)
return loglikelihoods
def _generate_random_continuous(self, r):
from numpy import log
# return ( (self.xmin**self.beta) -
# (1/self.Lambda) * log(1-r) )**(1/self.beta)
return (1/self.Lambda)* ( (self.Lambda*self.xmin)**self.beta -
log(1-r) )**(1/self.beta)
class Truncated_Power_Law(Distribution):
def parameters(self, params):
self.alpha = params[0]
self.parameter1 = self.alpha
self.parameter1_name = 'alpha'
self.Lambda = params[1]
self.parameter2 = self.Lambda
self.parameter2_name = 'lambda'
@property
def name(self):
return "truncated_power_law"
def _initial_parameters(self, data):
from numpy import log, sum, mean
alpha = 1 + len(data)/sum( log( data / (self.xmin) ))
Lambda = 1/mean(data)
return (alpha, Lambda)
def _in_standard_parameter_range(self):
return self.Lambda>0 and self.alpha>1
def _cdf_base_function(self, x):
from mpmath import gammainc
from numpy import vectorize
gammainc = vectorize(gammainc)
CDF = ( (gammainc(1-self.alpha,self.Lambda*x)).astype('float') /
self.Lambda**(1-self.alpha)
)
CDF = 1 -CDF
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return x**(-self.alpha) * exp(-self.Lambda * x)
@property
def _pdf_continuous_normalizer(self):
from mpmath import gammainc
C = ( self.Lambda**(1-self.alpha) /
float(gammainc(1-self.alpha,self.Lambda*self.xmin)))
return C
@property
def _pdf_discrete_normalizer(self):
if 0:
return False
from mpmath import lerchphi
from mpmath import exp # faster /here/ than numpy.exp
C = ( float(exp(self.xmin * self.Lambda) /
lerchphi(exp(-self.Lambda), self.alpha, self.xmin)) )
if self.xmax:
Cxmax = ( float(exp(self.xmax * self.Lambda) /
lerchphi(exp(-self.Lambda), self.alpha, self.xmax)) )
C = 1.0/C - 1.0/Cxmax
C = 1.0/C
return C
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and False:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
from mpmath import gammainc
# likelihoods = (data**-alpha)*exp(-Lambda*data)*\
# (Lambda**(1-alpha))/\
# float(gammainc(1-alpha,Lambda*xmin))
likelihoods = ( self.Lambda**(1-self.alpha) /
(data**self.alpha *
exp(self.Lambda*data) *
gammainc(1-self.alpha,self.Lambda*self.xmin)
).astype(float)
)
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def _generate_random_continuous(self, r):
def helper(r):
from numpy import log
from numpy.random import rand
while 1:
x = self.xmin - (1/self.Lambda) * log(1-r)
p = ( x/self.xmin )**-self.alpha
if rand()<p:
return x
r = rand()
from numpy import array
return array(list(map(helper, r)))
class Lognormal(Distribution):
def parameters(self, params):
self.mu = params[0]
self.parameter1 = self.mu
self.parameter1_name = 'mu'
self.sigma = params[1]
self.parameter2 = self.sigma
self.parameter2_name = 'sigma'
@property
def name(self):
return "lognormal"
def pdf(self, data=None):
"""
Returns the probability density function (normalized histogram) of the
theoretical distribution for the values in data within xmin and xmax,
if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
Returns
-------
probabilities : array
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
from numpy import tile
if not self.in_range():
return tile(10**float_info.min_10_exp, n)
if not self.discrete:
f = self._pdf_base_function(data)
C = self._pdf_continuous_normalizer
if C > 0:
likelihoods = f/C
else:
likelihoods = tile(10**float_info.min_10_exp, n)
else:
if self._pdf_discrete_normalizer:
f = self._pdf_base_function(data)
C = self._pdf_discrete_normalizer
likelihoods = f*C
elif self.discrete_approximation=='round':
likelihoods = self._round_discrete_approx(data)
else:
if self.discrete_approximation=='xmax':
upper_limit = self.xmax
else:
upper_limit = self.discrete_approximation
# from mpmath import exp
from numpy import arange
X = arange(self.xmin, upper_limit+1)
PDF = self._pdf_base_function(X)
PDF = (PDF/sum(PDF)).astype(float)
likelihoods = PDF[(data-self.xmin).astype(int)]
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
return likelihoods
def _round_discrete_approx(self, data):
"""
This function reformulates the calculation to avoid underflow errors
with the erf function. As implemented, erf(x) quickly approaches 1
while erfc(x) is more accurate. Since erfc(x) = 1 - erf(x),
calculations can be written using erfc(x)
"""
import numpy as np
import scipy.special as ss
""" Temporarily expand xmin and xmax to be able to grab the extra bit of
probability mass beyond the (integer) values of xmin and xmax
Note this is a design decision. One could also say this extra
probability "off the edge" of the distribution shouldn't be included,
and that implementation is retained below, commented out. Note, however,
that such a cliff means values right at xmin and xmax have half the width to
grab probability from, and thus are lower probability than they would otherwise
be. This is particularly concerning for values at xmin, which are typically
the most likely and greatly influence the distribution's fit.
"""
lower_data = data-.5
upper_data = data+.5
self.xmin -= .5
if self.xmax:
self.xmax += .5
# revised calculation written to avoid underflow errors
arg1 = (np.log(lower_data)-self.mu) / (np.sqrt(2)*self.sigma)
arg2 = (np.log(upper_data)-self.mu) / (np.sqrt(2)*self.sigma)
likelihoods = 0.5*(ss.erfc(arg1) - ss.erfc(arg2))
if not self.xmax:
norm = 0.5*ss.erfc((np.log(self.xmin)-self.mu) / (np.sqrt(2)*self.sigma))
else:
# may still need to be fixed
norm = - self._cdf_xmin + self._cdf_base_function(self.xmax)
self.xmin +=.5
if self.xmax:
self.xmax -= .5
return likelihoods/norm
def cdf(self, data=None, survival=False):
"""
The cumulative distribution function (CDF) of the lognormal
distribution. Calculated for the values given in data within xmin and
xmax, if present. Calculation was reformulated to avoid underflow
errors
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
from numpy import log, sqrt
import scipy.special as ss
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
val_data = (log(data)-self.mu) / (sqrt(2)*self.sigma)
val_xmin = (log(self.xmin)-self.mu) / (sqrt(2)*self.sigma)
CDF = 0.5 * (ss.erfc(val_xmin) - ss.erfc(val_data))
norm = 0.5 * ss.erfc(val_xmin)
if self.xmax:
# TO DO: Improve this line further for better numerical accuracy?
norm = norm - (1 - self._cdf_base_function(self.xmax))
CDF = CDF/norm
if survival:
CDF = 1 - CDF
possible_numerical_error = False
from numpy import isnan, min
if isnan(min(CDF)):
print("'nan' in fit cumulative distribution values.", file=sys.stderr)
possible_numerical_error = True
#if 0 in CDF or 1 in CDF:
# print("0 or 1 in fit cumulative distribution values.", file=sys.stderr)
# possible_numerical_error = True
if possible_numerical_error:
print("Likely underflow or overflow error: the optimal fit for this distribution gives values that are so extreme that we lack the numerical precision to calculate them.", file=sys.stderr)
return CDF
def _initial_parameters(self, data):
from numpy import mean, std, log
logdata = log(data)
return (mean(logdata), std(logdata))
def _in_standard_parameter_range(self):
#The standard deviation can't be negative
return self.sigma>0
def _cdf_base_function(self, x):
from numpy import sqrt, log
from scipy.special import erf
return 0.5 + ( 0.5 *
erf((log(x)-self.mu) / (sqrt(2)*self.sigma)))
def _pdf_base_function(self, x):
from numpy import exp, log
return ((1.0/x) *
exp(-( (log(x) - self.mu)**2 )/(2*self.sigma**2)))
@property
def _pdf_continuous_normalizer(self):
from mpmath import erfc
# from scipy.special import erfc
from scipy.constants import pi
from numpy import sqrt, log
C = (erfc((log(self.xmin) - self.mu) / (sqrt(2) * self.sigma)) /
sqrt(2/(pi*self.sigma**2)))
return float(C)
@property
def _pdf_discrete_normalizer(self):
return False
def _generate_random_continuous(self, r):
from numpy import exp, sqrt, log, frompyfunc
from mpmath import erf, erfinv
#This is a long, complicated function broken into parts.
#We use mpmath to maintain numerical accuracy as we run through
#erf and erfinv, until we get to more sane numbers. Thanks to
#Wolfram Alpha for producing the appropriate inverse of the CCDF
#for me, which is what we need to calculate these things.
erfinv = frompyfunc(erfinv,1,1)
Q = erf( ( log(self.xmin) - self.mu ) / (sqrt(2)*self.sigma))
Q = Q*r - r + 1.0
Q = erfinv(Q).astype('float')
return exp(self.mu + sqrt(2)*self.sigma*Q)
# def _generate_random_continuous(self, r1, r2=None):
# from numpy import log, sqrt, exp, sin, cos
# from scipy.constants import pi
# if r2==None:
# from numpy.random import rand
# r2 = rand(len(r1))
# r2_provided = False
# else:
# r2_provided = True
#
# rho = sqrt(-2.0 * self.sigma**2.0 * log(1-r1))
# theta = 2.0 * pi * r2
# x1 = exp(rho * sin(theta))
# x2 = exp(rho * cos(theta))
#
# if r2_provided:
# return x1, x2
# else:
# return x1
def nested_loglikelihood_ratio(loglikelihoods1, loglikelihoods2, **kwargs):
"""
Calculates a loglikelihood ratio and the p-value for testing which of two
probability distributions is more likely to have created a set of
observations. Assumes one of the probability distributions is a nested
version of the other.
Parameters
----------
loglikelihoods1 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
loglikelihoods2 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
nested : bool, optional
Whether one of the two probability distributions that generated the
likelihoods is a nested version of the other. True by default.
normalized_ratio : bool, optional
Whether to return the loglikelihood ratio, R, or the normalized
ratio R/sqrt(n*variance)
Returns
-------
R : float
The loglikelihood ratio of the two sets of likelihoods. If positive,
the first set of likelihoods is more likely (and so the probability
distribution that produced them is a better fit to the data). If
negative, the reverse is true.
p : float
The significance of the sign of R. If below a critical values
(typically .05) the sign of R is taken to be significant. If above the
critical value the sign of R is taken to be due to statistical
fluctuations.
"""
return loglikelihood_ratio(loglikelihoods1, loglikelihoods2,
nested=True, **kwargs)
def loglikelihood_ratio(loglikelihoods1, loglikelihoods2,
nested=False, normalized_ratio=False):
"""
Calculates a loglikelihood ratio and the p-value for testing which of two
probability distributions is more likely to have created a set of
observations.
Parameters
----------
loglikelihoods1 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
loglikelihoods2 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
nested: bool, optional
Whether one of the two probability distributions that generated the
likelihoods is a nested version of the other. False by default.
normalized_ratio : bool, optional
Whether to return the loglikelihood ratio, R, or the normalized
ratio R/sqrt(n*variance)
Returns
-------
R : float
The loglikelihood ratio of the two sets of likelihoods. If positive,
the first set of likelihoods is more likely (and so the probability
distribution that produced them is a better fit to the data). If
negative, the reverse is true.
p : float
The significance of the sign of R. If below a critical values
(typically .05) the sign of R is taken to be significant. If above the
critical value the sign of R is taken to be due to statistical
fluctuations.
"""
from numpy import sqrt
from scipy.special import erfc
n = float(len(loglikelihoods1))
if n==0:
R = 0
p = 1
return R, p
from numpy import asarray
loglikelihoods1 = asarray(loglikelihoods1)
loglikelihoods2 = asarray(loglikelihoods2)
#Clean for extreme values, if any
from numpy import inf, log
from sys import float_info
min_val = log(10**float_info.min_10_exp)
loglikelihoods1[loglikelihoods1==-inf] = min_val
loglikelihoods2[loglikelihoods2==-inf] = min_val
R = sum(loglikelihoods1-loglikelihoods2)
from numpy import mean
mean_diff = mean(loglikelihoods1)-mean(loglikelihoods2)
variance = sum(
( (loglikelihoods1-loglikelihoods2) - mean_diff)**2
)/n
if nested:
from scipy.stats import chi2
p = 1 - chi2.cdf(abs(2*R), 1)
else:
p = erfc( abs(R) / sqrt(2*n*variance))
if normalized_ratio:
R = R/sqrt(n*variance)
return R, p
def cdf(data, survival=False, **kwargs):
"""
The cumulative distribution function (CDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return cumulative_distribution_function(data, survival=survival, **kwargs)
def ccdf(data, survival=True, **kwargs):
"""
The complementary cumulative distribution function (CCDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). True by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return cumulative_distribution_function(data, survival=survival, **kwargs)
def cumulative_distribution_function(data,
xmin=None, xmax=None,
survival=False, **kwargs):
"""
The cumulative distribution function (CDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). False by default.
xmin : int or float, optional
The minimum data size to include. Values less than xmin are excluded.
xmax : int or float, optional
The maximum data size to include. Values greater than xmin are
excluded.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
from numpy import array
data = array(data)
if not data.any():
from numpy import nan
return array([nan]), array([nan])
data = trim_to_range(data, xmin=xmin, xmax=xmax)
n = float(len(data))
from numpy import sort
data = sort(data)
all_unique = not( any( data[:-1]==data[1:] ) )
if all_unique:
from numpy import arange
CDF = arange(n)/n
else:
#This clever bit is a way of using searchsorted to rapidly calculate the
#CDF of data with repeated values comes from Adam Ginsburg's plfit code,
#specifically https://github.com/keflavich/plfit/commit/453edc36e4eb35f35a34b6c792a6d8c7e848d3b5#plfit/plfit.py
from numpy import searchsorted, unique
CDF = searchsorted(data, data,side='left')/n
unique_data, unique_indices = unique(data, return_index=True)
data=unique_data
CDF = CDF[unique_indices]
if survival:
CDF = 1-CDF
return data, CDF
def is_discrete(data):
"""Checks if every element of the array is an integer."""
from numpy import floor
return (floor(data)==data.astype(float)).all()
def trim_to_range(data, xmin=None, xmax=None, **kwargs):
"""
Removes elements of the data that are above xmin or below xmax (if present)
"""
from numpy import asarray
data = asarray(data)
if xmin:
data = data[data>=xmin]
if xmax:
data = data[data<=xmax]
return data
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
"""
Returns the probability density function (normalized histogram) of the
data.
Parameters
----------
data : list or array
xmin : float, optional
Minimum value of the PDF. If None, uses the smallest value in the data.
xmax : float, optional
Maximum value of the PDF. If None, uses the largest value in the data.
linear_bins : float, optional
Whether to use linearly spaced bins, as opposed to logarithmically
spaced bins (recommended for log-log plots).
Returns
-------
bin_edges : array
The edges of the bins of the probability density function.
probabilities : array
The portion of the data that is within the bin. Length 1 less than
bin_edges, as it corresponds to the spaces between them.
"""
from numpy import logspace, histogram, floor, unique
from math import ceil, log10
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = log10(xmin)
log_max_size = log10(xmax)
number_of_bins = ceil((log_max_size-log_min_size)*10)
bins=unique(
floor(
logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = histogram(data, bins, density=True)
return edges, hist
def checkunique(data):
"""Quickly checks if a sorted array is all unique elements."""
for i in range(len(data)-1):
if data[i]==data[i+1]:
return False
return True
#def checksort(data):
# """
# Checks if the data is sorted, in O(n) time. If it isn't sorted, it then
# sorts it in O(nlogn) time. Expectation is that the data will typically
# be sorted. Presently slower than numpy's sort, even on large arrays, and
# so is useless.
# """
#
# n = len(data)
# from numpy import arange
# if not all(data[i] <= data[i+1] for i in arange(n-1)):
# from numpy import sort
# data = sort(data)
# return data
def plot_ccdf(data, ax=None, survival=False, **kwargs):
return plot_cdf(data, ax=ax, survival=True, **kwargs)
"""
Plots the complementary cumulative distribution function (CDF) of the data
to a new figure or to axis ax if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
def plot_cdf(data, ax=None, survival=False, **kwargs):
"""
Plots the cumulative distribution function (CDF) of the data to a new
figure or to axis ax if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
bins, CDF = cdf(data, survival=survival, **kwargs)
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, CDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, CDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def plot_pdf(data, ax=None, linear_bins=False, **kwargs):
"""
Plots the probability density function (PDF) to a new figure or to axis ax
if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
linear_bins : bool, optional
Whether to use linearly spaced bins (True) or logarithmically
spaced bins (False). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
edges, hist = pdf(data, linear_bins=linear_bins, **kwargs)
bin_centers = (edges[1:]+edges[:-1])/2.0
from numpy import nan
hist[hist==0] = nan
if not ax:
import matplotlib.pyplot as plt
plt.plot(bin_centers, hist, **kwargs)
ax = plt.gca()
else:
ax.plot(bin_centers, hist, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def bisect_map(mn, mx, function, target):
"""
Uses binary search to find the target solution to a function, searching in
a given ordered sequence of integer values.
Parameters
----------
seq : list or array, monotonically increasing integers
function : a function that takes a single integer input, which monotonically
decreases over the range of seq.
target : the target value of the function
Returns
-------
value : the input value that yields the target solution. If there is no
exact solution in the input sequence, finds the nearest value k such that
function(k) <= target < function(k+1). This is similar to the behavior of
bisect_left in the bisect package. If even the first, leftmost value of seq
does not satisfy this condition, -1 is returned.
"""
if function([mn]) < target or function([mx]) > target:
return -1
while 1:
if mx==mn+1:
return mn
m = (mn + mx) / 2
value = function([m])[0]
if value > target:
mn = m
elif value < target:
mx = m
else:
return m
######################
#What follows are functional programming forms of the above code, which are more
#clunky and have somewhat less functionality. However, they are here if your
#really want them.
class Distribution_Fit(object):
def __init__(self, data, name, xmin, discrete=False, xmax=None, method='Likelihood', estimate_discrete=True):
self.data = data
self.discrete = discrete
self.xmin = xmin
self.xmax = xmax
self.method = method
self.name = name
self.estimate_discrete = estimate_discrete
return
def __getattr__(self, name):
param_names = {'lognormal': ('mu', 'sigma', None),
'exponential': ('Lambda', None, None),
'truncated_power_law': ('alpha', 'Lambda', None),
'power_law': ('alpha', None, None),
'negative_binomial': ('r', 'p', None),
'stretched_exponential': ('Lambda', 'beta', None),
'gamma': ('k', 'theta', None)}
param_names = param_names[self.name]
if name in param_names:
if name == param_names[0]:
setattr(self, name, self.parameter1)
elif name == param_names[1]:
setattr(self, name, self.parameter2)
elif name == param_names[2]:
setattr(self, name, self.parameter3)
return getattr(self, name)
elif name in ['parameters',
'parameter1_name',
'parameter1',
'parameter2_name',
'parameter2',
'parameter3_name',
'parameter3',
'loglikelihood']:
self.parameters, self.loglikelihood = distribution_fit(self.data, distribution=self.name, discrete=self.discrete,
xmin=self.xmin, xmax=self.xmax, search_method=self.method, estimate_discrete=self.estimate_discrete)
self.parameter1 = self.parameters[0]
if len(self.parameters) < 2:
self.parameter2 = None
else:
self.parameter2 = self.parameters[1]
if len(self.parameters) < 3:
self.parameter3 = None
else:
self.parameter3 = self.parameters[2]
self.parameter1_name = param_names[0]
self.parameter2_name = param_names[1]
self.parameter3_name = param_names[2]
if name == 'parameters':
return self.parameters
elif name == 'parameter1_name':
return self.parameter1_name
elif name == 'parameter2_name':
return self.parameter2_name
elif name == 'parameter3_name':
return self.parameter3_name
elif name == 'parameter1':
return self.parameter1
elif name == 'parameter2':
return self.parameter2
elif name == 'parameter3':
return self.parameter3
elif name == 'loglikelihood':
return self.loglikelihood
if name == 'D':
if self.name != 'power_law':
self.D = None
else:
self.D = power_law_ks_distance(self.data, self.parameter1, xmin=self.xmin, xmax=self.xmax, discrete=self.discrete)
return self.D
if name == 'p':
print("A p value outside of a loglihood ratio comparison to another candidate distribution is not currently supported.\n \
If your data set is particularly large and has any noise in it at all, using such statistical tools as the Monte Carlo method\n\
can lead to erroneous results anyway; the presence of the noise means the distribution will obviously not perfectly fit the\n\
candidate distribution, and the very large number of samples will make the Monte Carlo simulations very close to a perfect\n\
fit. As such, such a test will always fail, unless your candidate distribution perfectly describes all elements of the\n\
system, including the noise. A more helpful analysis is the comparison between multiple, specific candidate distributions\n\
(the loglikelihood ratio test), which tells you which is the best fit of these distributions.", file=sys.stderr)
self.p = None
return self.p
#
# elif name in ['power_law_loglikelihood_ratio',
# 'power_law_p']:
# pl_R, pl_p = distribution_compare(self.data, 'power_law', self.power_law.parameters, name, self.parameters, self.discrete, self.xmin, self.xmax)
# self.power_law_loglikelihood_ratio = pl_R
# self.power_law_p = pl_p
# if name=='power_law_loglikelihood_ratio':
# return self.power_law_loglikelihood_ratio
# if name=='power_law_p':
# return self.power_law_p
# elif name in ['truncated_power_law_loglikelihood_ratio',
# 'truncated_power_law_p']:
# tpl_R, tpl_p = distribution_compare(self.data, 'truncated_power_law', self.truncated_power_law.parameters, name, self.parameters, self.discrete, self.xmin, self.xmax)
# self.truncated_power_law_loglikelihood_ratio = tpl_R
# self.truncated_power_law_p = tpl_p
# if name=='truncated_power_law_loglikelihood_ratio':
# return self.truncated_power_law_loglikelihood_ratio
# if name=='truncated_power_law_p':
# return self.truncated_power_law_p
else:
raise AttributeError(name)
def distribution_fit(data, distribution='all', discrete=False, xmin=None, xmax=None, \
comparison_alpha=None, search_method='Likelihood', estimate_discrete=True):
from numpy import log
if distribution == 'negative_binomial' and not is_discrete(data):
print("Rounding to integer values for negative binomial fit.", file=sys.stderr)
from numpy import around
data = around(data)
discrete = True
#If we aren't given an xmin, calculate the best possible one for a power law. This can take awhile!
if xmin is None or xmin == 'find' or type(xmin) == tuple or type(xmin) == list:
print("Calculating best minimal value", file=sys.stderr)
if 0 in data:
print("Value 0 in data. Throwing out 0 values", file=sys.stderr)
data = data[data != 0]
xmin, D, alpha, loglikelihood, n_tail, noise_flag = find_xmin(data, discrete=discrete, xmax=xmax, search_method=search_method, estimate_discrete=estimate_discrete, xmin_range=xmin)
else:
alpha = None
if distribution == 'power_law' and alpha:
return [alpha], loglikelihood
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
xmax = float(xmax)
data = data[data <= xmax]
#Special case where we call distribution_fit multiple times to do all comparisons
if distribution == 'all':
print("Analyzing all distributions", file=sys.stderr)
print("Calculating power law fit", file=sys.stderr)
if alpha:
pl_parameters = [alpha]
else:
pl_parameters, loglikelihood = distribution_fit(data, 'power_law', discrete, xmin, xmax, search_method=search_method, estimate_discrete=estimate_discrete)
results = {}
results['xmin'] = xmin
results['xmax'] = xmax
results['discrete'] = discrete
results['fits'] = {}
results['fits']['power_law'] = (pl_parameters, loglikelihood)
print("Calculating truncated power law fit", file=sys.stderr)
tpl_parameters, loglikelihood, R, p = distribution_fit(data, 'truncated_power_law', discrete, xmin, xmax, comparison_alpha=pl_parameters[0], search_method=search_method, estimate_discrete=estimate_discrete)
results['fits']['truncated_power_law'] = (tpl_parameters, loglikelihood)
results['power_law_comparison'] = {}
results['power_law_comparison']['truncated_power_law'] = (R, p)
results['truncated_power_law_comparison'] = {}
supported_distributions = ['exponential', 'lognormal', 'stretched_exponential', 'gamma']
for i in supported_distributions:
print("Calculating %s fit" % (i,), file=sys.stderr)
parameters, loglikelihood, R, p = distribution_fit(data, i, discrete, xmin, xmax, comparison_alpha=pl_parameters[0], search_method=search_method, estimate_discrete=estimate_discrete)
results['fits'][i] = (parameters, loglikelihood)
results['power_law_comparison'][i] = (R, p)
R, p = distribution_compare(data, 'truncated_power_law', tpl_parameters, i, parameters, discrete, xmin, xmax)
results['truncated_power_law_comparison'][i] = (R, p)
return results
#Handle edge case where we don't have enough data
no_data = False
if xmax and all((data > xmax) + (data < xmin)):
#Everything is beyond the bounds of the xmax and xmin
no_data = True
if all(data < xmin):
no_data = True
if len(data) < 2:
no_data = True
if no_data:
from numpy import array
from sys import float_info
parameters = array([0, 0, 0])
if search_method == 'Likelihood':
loglikelihood = -10 ** float_info.max_10_exp
if search_method == 'KS':
loglikelihood = 1
if comparison_alpha is None:
return parameters, loglikelihood
R = 10 ** float_info.max_10_exp
p = 1
return parameters, loglikelihood, R, p
n = float(len(data))
#Initial search parameters, estimated from the data
# print("Calculating initial parameters for search", file=sys.stderr)
if distribution == 'power_law' and not alpha:
initial_parameters = [1 + n / sum(log(data / (xmin)))]
elif distribution == 'exponential':
from numpy import mean
initial_parameters = [1 / mean(data)]
elif distribution == 'stretched_exponential':
from numpy import mean
initial_parameters = [1 / mean(data), 1]
elif distribution == 'truncated_power_law':
from numpy import mean
initial_parameters = [1 + n / sum(log(data / xmin)), 1 / mean(data)]
elif distribution == 'lognormal':
from numpy import mean, std
logdata = log(data)
initial_parameters = [mean(logdata), std(logdata)]
elif distribution == 'negative_binomial':
initial_parameters = [1, .5]
elif distribution == 'gamma':
from numpy import mean
initial_parameters = [n / sum(log(data / xmin)), mean(data)]
if search_method == 'Likelihood':
# print("Searching using maximum likelihood method", file=sys.stderr)
#If the distribution is a continuous power law without an xmax, and we're using the maximum likelihood method, we can compute the parameters and likelihood directly
if distribution == 'power_law' and not discrete and not xmax and not alpha:
from numpy import array, nan
alpha = 1 + n /\
sum(log(data / xmin))
loglikelihood = n * log(alpha - 1.0) - n * log(xmin) - alpha * sum(log(data / xmin))
if loglikelihood == nan:
loglikelihood = 0
parameters = array([alpha])
return parameters, loglikelihood
elif distribution == 'power_law' and discrete and not xmax and not alpha and estimate_discrete:
from numpy import array, nan
alpha = 1 + n /\
sum(log(data / (xmin - .5)))
loglikelihood = n * log(alpha - 1.0) - n * log(xmin) - alpha * sum(log(data / xmin))
if loglikelihood == nan:
loglikelihood = 0
parameters = array([alpha])
return parameters, loglikelihood
#Otherwise, we set up a likelihood function
likelihood_function = likelihood_function_generator(distribution, discrete=discrete, xmin=xmin, xmax=xmax)
#Search for the best fit parameters for the target distribution, on this data
from scipy.optimize import fmin
parameters, negative_loglikelihood, iter, funcalls, warnflag, = \
fmin(
lambda p: -sum(log(likelihood_function(p, data))),
initial_parameters, full_output=1, disp=False)
loglikelihood = -negative_loglikelihood
if comparison_alpha:
R, p = distribution_compare(data, 'power_law', [comparison_alpha], distribution, parameters, discrete, xmin, xmax)
return parameters, loglikelihood, R, p
else:
return parameters, loglikelihood
elif search_method == 'KS':
print("Not yet supported. Sorry.", file=sys.stderr)
return
# #Search for the best fit parameters for the target distribution, on this data
# from scipy.optimize import fmin
# parameters, KS, iter, funcalls, warnflag, = \
# fmin(\
# lambda p: -sum(log(likelihood_function(p, data))),\
# initial_parameters, full_output=1, disp=False)
# loglikelihood =-negative_loglikelihood
#
# if comparison_alpha:
# R, p = distribution_compare(data, 'power_law',[comparison_alpha], distribution, parameters, discrete, xmin, xmax)
# return parameters, loglikelihood, R, p
# else:
# return parameters, loglikelihood
def distribution_compare(data, distribution1, parameters1,
distribution2, parameters2,
discrete, xmin, xmax, nested=None, **kwargs):
no_data = False
if xmax and all((data > xmax) + (data < xmin)):
#Everything is beyond the bounds of the xmax and xmin
no_data = True
if all(data < xmin):
no_data = True
if no_data:
R = 0
p = 1
return R, p
likelihood_function1 = likelihood_function_generator(distribution1, discrete, xmin, xmax)
likelihood_function2 = likelihood_function_generator(distribution2, discrete, xmin, xmax)
likelihoods1 = likelihood_function1(parameters1, data)
likelihoods2 = likelihood_function2(parameters2, data)
if ((distribution1 in distribution2) or
(distribution2 in distribution1)
and nested is None):
print("Assuming nested distributions", file=sys.stderr)
nested = True
from numpy import log
R, p = loglikelihood_ratio(log(likelihoods1), log(likelihoods2),
nested=nested, **kwargs)
return R, p
def likelihood_function_generator(distribution_name, discrete=False, xmin=1, xmax=None):
if distribution_name == 'power_law':
likelihood_function = lambda parameters, data:\
power_law_likelihoods(
data, parameters[0], xmin, xmax, discrete)
elif distribution_name == 'exponential':
likelihood_function = lambda parameters, data:\
exponential_likelihoods(
data, parameters[0], xmin, xmax, discrete)
elif distribution_name == 'stretched_exponential':
likelihood_function = lambda parameters, data:\
stretched_exponential_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'truncated_power_law':
likelihood_function = lambda parameters, data:\
truncated_power_law_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'lognormal':
likelihood_function = lambda parameters, data:\
lognormal_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'negative_binomial':
likelihood_function = lambda parameters, data:\
negative_binomial_likelihoods(
data, parameters[0], parameters[1], xmin, xmax)
elif distribution_name == 'gamma':
likelihood_function = lambda parameters, data:\
gamma_likelihoods(
data, parameters[0], parameters[1], xmin, xmax)
return likelihood_function
def find_xmin(data, discrete=False, xmax=None, search_method='Likelihood', return_all=False, estimate_discrete=True, xmin_range=None):
from numpy import sort, unique, asarray, argmin, vstack, arange, sqrt
if 0 in data:
print("Value 0 in data. Throwing out 0 values", file=sys.stderr)
data = data[data != 0]
if xmax:
data = data[data <= xmax]
#Much of the rest of this function was inspired by Adam Ginsburg's plfit code, specifically around lines 131-143 of this version: http://code.google.com/p/agpy/source/browse/trunk/plfit/plfit.py?spec=svn359&r=357
if not all(data[i] <= data[i + 1] for i in range(len(data) - 1)):
data = sort(data)
if xmin_range == 'find' or xmin_range is None:
possible_xmins = data
else:
possible_xmins = data[data <= max(xmin_range)]
possible_xmins = possible_xmins[possible_xmins >= min(xmin_range)]
xmins, xmin_indices = unique(possible_xmins, return_index=True)
xmins = xmins[:-1]
if len(xmins) < 2:
from sys import float_info
xmin = 1
D = 1
alpha = 0
loglikelihood = -10 ** float_info.max_10_exp
n_tail = 1
noise_flag = True
Ds = 1
alphas = 0
sigmas = 1
if not return_all:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag
else:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag, xmins, Ds, alphas, sigmas
xmin_indices = xmin_indices[:-1] # Don't look at last xmin, as that's also the xmax, and we want to at least have TWO points to fit!
if search_method == 'Likelihood':
alpha_MLE_function = lambda xmin: distribution_fit(data, 'power_law', xmin=xmin, xmax=xmax, discrete=discrete, search_method='Likelihood', estimate_discrete=estimate_discrete)
fits = asarray(list(map(alpha_MLE_function, xmins)))
elif search_method == 'KS':
alpha_KS_function = lambda xmin: distribution_fit(data, 'power_law', xmin=xmin, xmax=xmax, discrete=discrete, search_method='KS', estimate_discrete=estimate_discrete)[0]
fits = asarray(list(map(alpha_KS_function, xmins)))
params = fits[:, 0]
alphas = vstack(params)[:, 0]
loglikelihoods = fits[:, 1]
ks_function = lambda index: power_law_ks_distance(data, alphas[index], xmins[index], xmax=xmax, discrete=discrete)
Ds = asarray(list(map(ks_function, arange(len(xmins)))))
sigmas = (alphas - 1) / sqrt(len(data) - xmin_indices + 1)
good_values = sigmas < .1
#Find the last good value (The first False, where sigma > .1):
xmin_max = argmin(good_values)
if good_values.all(): # If there are no fits beyond the noise threshold
min_D_index = argmin(Ds)
noise_flag = False
elif xmin_max > 0:
min_D_index = argmin(Ds[:xmin_max])
noise_flag = False
else:
min_D_index = argmin(Ds)
noise_flag = True
xmin = xmins[min_D_index]
D = Ds[min_D_index]
alpha = alphas[min_D_index]
loglikelihood = loglikelihoods[min_D_index]
n_tail = sum(data >= xmin)
if not return_all:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag
else:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag, xmins, Ds, alphas, sigmas
def power_law_ks_distance(data, alpha, xmin, xmax=None, discrete=False, kuiper=False):
from numpy import arange, sort, mean
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
n = float(len(data))
if n < 2:
if kuiper:
return 1, 1, 2
return 1
if not all(data[i] <= data[i + 1] for i in arange(n - 1)):
data = sort(data)
if not discrete:
Actual_CDF = arange(n) / n
Theoretical_CDF = 1 - (data / xmin) ** (-alpha + 1)
if discrete:
from scipy.special import zeta
if xmax:
bins, Actual_CDF = cumulative_distribution_function(data,xmin=xmin,xmax=xmax)
Theoretical_CDF = 1 - ((zeta(alpha, bins) - zeta(alpha, xmax+1)) /\
(zeta(alpha, xmin)-zeta(alpha,xmax+1)))
if not xmax:
bins, Actual_CDF = cumulative_distribution_function(data,xmin=xmin)
Theoretical_CDF = 1 - (zeta(alpha, bins) /\
zeta(alpha, xmin))
D_plus = max(Theoretical_CDF - Actual_CDF)
D_minus = max(Actual_CDF - Theoretical_CDF)
Kappa = 1 + mean(Theoretical_CDF - Actual_CDF)
if kuiper:
return D_plus, D_minus, Kappa
D = max(D_plus, D_minus)
return D
def power_law_likelihoods(data, alpha, xmin, xmax=False, discrete=False):
if alpha < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
if not discrete:
likelihoods = (data ** -alpha) *\
((alpha - 1) * xmin ** (alpha - 1))
if discrete:
if alpha < 1:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
if not xmax:
from scipy.special import zeta
likelihoods = (data ** -alpha) /\
zeta(alpha, xmin)
if xmax:
from scipy.special import zeta
likelihoods = (data ** -alpha) /\
(zeta(alpha, xmin) - zeta(alpha, xmax + 1))
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def negative_binomial_likelihoods(data, r, p, xmin=0, xmax=False):
#Better to make this correction earlier on in distribution_fit, so as to not recheck for discreteness and reround every time fmin is used.
#if not is_discrete(data):
# print("Rounding to nearest integer values for negative binomial fit.", file=sys.stderr)
# from numpy import around
# data = around(data)
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import asarray
from scipy.misc import comb
pmf = lambda k: comb(k + r - 1, k) * (1 - p) ** r * p ** k
likelihoods = asarray(list(map(pmf, data))).flatten()
if xmin != 0 or xmax:
xmax = max(data)
from numpy import arange
normalization_constant = sum(list(map(pmf, arange(xmin, xmax + 1))))
likelihoods = likelihoods / normalization_constant
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def exponential_likelihoods(data, Lambda, xmin, xmax=False, discrete=False):
if Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
likelihoods = Lambda * exp(Lambda * (xmin - data)) # Simplified so as not to throw a nan from infs being divided by each other
if discrete:
if not xmax:
likelihoods = exp(-Lambda * data) *\
(1 - exp(-Lambda)) * exp(Lambda * xmin)
if xmax:
likelihoods = exp(-Lambda * data) * (1 - exp(-Lambda))\
/ (exp(-Lambda * xmin) - exp(-Lambda * (xmax + 1)))
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def stretched_exponential_likelihoods(data, Lambda, beta, xmin, xmax=False, discrete=False):
if Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
# likelihoods = (data**(beta-1) * exp(-Lambda*(data**beta)))*\
# (beta*Lambda*exp(Lambda*(xmin**beta)))
likelihoods = data ** (beta - 1) * beta * Lambda * exp(Lambda * (xmin ** beta - data ** beta)) # Simplified so as not to throw a nan from infs being divided by each other
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = X ** (beta - 1) * beta * Lambda * exp(Lambda * (xmin ** beta - X ** beta)) # Simplified so as not to throw a nan from infs being divided by each other
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def gamma_likelihoods(data, k, theta, xmin, xmax=False, discrete=False):
if k <= 0 or theta <= 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
from mpmath import gammainc
# from scipy.special import gamma, gammainc #Not NEARLY numerically accurate enough for the job
if not discrete:
likelihoods = (data ** (k - 1)) / (exp(data / theta) * (theta ** k) * float(gammainc(k)))
#Calculate how much probability mass is beyond xmin, and normalize by it
normalization_constant = 1 - float(gammainc(k, 0, xmin / theta, regularized=True)) # Mpmath's regularized option divides by gamma(k)
likelihoods = likelihoods / normalization_constant
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = (X ** (k - 1)) / (exp(X / theta) * (theta ** k) * float(gammainc(k)))
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def truncated_power_law_likelihoods(data, alpha, Lambda, xmin, xmax=False, discrete=False):
if alpha < 0 or Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
from mpmath import gammainc
# from scipy.special import gamma, gammaincc #Not NEARLY accurate enough to do the job
# likelihoods = (data**-alpha)*exp(-Lambda*data)*\
# (Lambda**(1-alpha))/\
# float(gammaincc(1-alpha,Lambda*xmin))
#Simplified so as not to throw a nan from infs being divided by each other
likelihoods = (Lambda ** (1 - alpha)) /\
((data ** alpha) * exp(Lambda * data) * gammainc(1 - alpha, Lambda * xmin)).astype(float)
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = (X ** -alpha) * exp(-Lambda * X)
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def lognormal_likelihoods(data, mu, sigma, xmin, xmax=False, discrete=False):
from numpy import log
if sigma <= 0 or mu < log(xmin):
#The standard deviation can't be negative, and the mean of the logarithm of the distribution can't be smaller than the log of the smallest member of the distribution!
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
if not discrete:
from numpy import sqrt, exp
# from mpmath import erfc
from scipy.special import erfc
from scipy.constants import pi
likelihoods = (1.0 / data) * exp(-((log(data) - mu) ** 2) / (2 * sigma ** 2)) *\
sqrt(2 / (pi * sigma ** 2)) / erfc((log(xmin) - mu) / (sqrt(2) * sigma))
# likelihoods = likelihoods.astype(float)
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange, exp
# from mpmath import exp
X = arange(xmin, xmax + 1)
# PDF_function = lambda x: (1.0/x)*exp(-( (log(x) - mu)**2 ) / 2*sigma**2)
# PDF = asarray(list(map(PDF_function,X)))
PDF = (1.0 / X) * exp(-((log(X) - mu) ** 2) / (2 * (sigma ** 2)))
PDF = (PDF / sum(PDF)).astype(float)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
| gpl-3.0 |
yyjiang/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
MobleyLab/SAMPL6 | host_guest/Analysis/Scripts/analyze_sampling.py | 1 | 116143 | #!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import collections
import copy
import itertools
import json
import math
import os
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
from matplotlib import pyplot as plt
from pkganalysis.stats import mean_confidence_interval
from pkganalysis.sampling import (SamplingSubmission, YankSamplingAnalysis,
YANK_N_ITERATIONS, DG_KEY, DDG_KEY, export_dictionary)
from pkganalysis.submission import (load_submissions)
# =============================================================================
# CONSTANTS
# =============================================================================
YANK_METHOD_PAPER_NAME = 'OpenMM/HREX'
# Paths to input data.
SAMPLING_SUBMISSIONS_DIR_PATH = '../SubmissionsDoNotUpload/975/'
YANK_ANALYSIS_DIR_PATH = 'YankAnalysis/Sampling/'
SAMPLING_ANALYSIS_DIR_PATH = '../SAMPLing/'
SAMPLING_DATA_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Data')
SAMPLING_PLOT_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Plots')
SAMPLING_PAPER_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'PaperImages')
# All system ids.
SYSTEM_IDS = [
'CB8-G3-0', 'CB8-G3-1', 'CB8-G3-2', 'CB8-G3-3', 'CB8-G3-4',
'OA-G3-0', 'OA-G3-1', 'OA-G3-2', 'OA-G3-3', 'OA-G3-4',
'OA-G6-0', 'OA-G6-1', 'OA-G6-2', 'OA-G6-3', 'OA-G6-4'
]
# Kelly's colors for maximum contrast.
# "gray95", "gray13", "gold2", "plum4", "darkorange1", "lightskyblue2", "firebrick", "burlywood3", "gray51", "springgreen4", "lightpink2", "deepskyblue4", "lightsalmon2", "mediumpurple4", "orange", "maroon", "yellow3", "brown4", "yellow4", "sienna4", "chocolate", "gray19"
KELLY_COLORS = ['#F2F3F4', '#222222', '#F3C300', '#875692', '#F38400', '#A1CAF1', '#BE0032', '#C2B280', '#848482', '#008856', '#E68FAC', '#0067A5', '#F99379', '#604E97', '#F6A600', '#B3446C', '#DCD300', '#882D17', '#8DB600', '#654522', '#E25822', '#2B3D26']
TAB10_COLORS = sns.color_palette('tab10')
# Index of Kelly's colors associated to each submission.
SUBMISSION_COLORS = {
'AMBER/APR': 'dodgerblue',#KELLY_COLORS[11],
'OpenMM/REVO': 'gold', #KELLY_COLORS[7],
'OpenMM/SOMD': KELLY_COLORS[4],
'GROMACS/EE': 'darkviolet', #KELLY_COLORS[3],
'GROMACS/EE-fullequil': 'hotpink', #KELLY_COLORS[10],
YANK_METHOD_PAPER_NAME: '#4ECC41', #'limegreen', #KELLY_COLORS[9],
'GROMACS/NS-DS/SB-long': KELLY_COLORS[6],
'GROMACS/NS-DS/SB': KELLY_COLORS[1],
'GROMACS/NS-Jarz-F': TAB10_COLORS[0],
'GROMACS/NS-Jarz-R': TAB10_COLORS[1],
'GROMACS/NS-Gauss-F': TAB10_COLORS[2],
'GROMACS/NS-Gauss-R': TAB10_COLORS[4],
'NAMD/BAR': 'saddlebrown'
}
SUBMISSION_LINE_STYLES = {
'AMBER/APR': '--',
'OpenMM/REVO': '-',
'OpenMM/SOMD': '-',
'GROMACS/EE': '-',
'GROMACS/EE-fullequil': '-',
YANK_METHOD_PAPER_NAME: '-',
'GROMACS/NS-DS/SB-long': '-',
'GROMACS/NS-DS/SB': '-',
'GROMACS/NS-Jarz-F': '-',
'GROMACS/NS-Jarz-R': '-',
'GROMACS/NS-Gauss-F': '-',
'GROMACS/NS-Gauss-R': '-',
'NAMD/BAR': '--',
}
N_ENERGY_EVALUATIONS_SCALE = 1e6
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def reduce_to_first_significant_digit(quantity, uncertainty):
"""Truncate a quantity to the first significant digit of its uncertainty."""
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
def load_yank_analysis():
"""Load the YANK analysis in a single dataframe."""
yank_free_energies = {}
for system_id in SYSTEM_IDS:
file_path = os.path.join(YANK_ANALYSIS_DIR_PATH, 'yank-{}.json'.format(system_id))
with open(file_path, 'r') as f:
yank_free_energies[system_id] = json.load(f)
return yank_free_energies
def fit_efficiency(mean_data, find_best_fit=True):
"""Compute the efficiency by fitting the model and using only the asymptotic data.
We fit using the simulation percentage as the independent value
because it is less prone to overflowing during fitting. We then
return the efficiency in units of (kcal/mol)**2/n_energy_evaluations.
"""
from scipy.optimize import curve_fit
def model(x, log_efficiency):
return np.exp(log_efficiency) / x
vars = mean_data['std'].values**2
cost = mean_data['Simulation percentage'].values
# cost = mean_data['N energy evaluations'].values / 1e7
if find_best_fit:
# Find fit with best error up to discarding 70% of calculation.
max_discarded = math.floor(0.5*len(cost))
else:
# Use all the data.
max_discarded = 1
# Fit.
fits = []
for n_discarded in range(max_discarded):
cost_fit = cost[n_discarded:]
vars_fit = vars[n_discarded:]
fit = curve_fit(model, cost_fit, vars_fit, p0=[0.0])
fits.append((np.exp(fit[0]), fit[1]))
# Find the fit with the minimum error.
n_discarded = fits.index(min(fits, key=lambda x: x[1]))
# Convert efficiency / simulation_percentage to efficiency / n_energy_evaluations
efficiency = fits[n_discarded][0][0] / 100 * mean_data['N energy evaluations'].values[-1]
# efficiency = fits[n_discarded][0][0] * 1e7
return efficiency, n_discarded
def export_submissions(submissions, reference_free_energies):
"""Export the submission data to CSV and JSON format."""
for submission in submissions:
exported_data = {}
# Export data of the 5 independent replicates.
for system_id in sorted(submission.data['System ID'].unique()):
system_id_data = submission.data[submission.data['System ID'] == system_id]
exported_data[system_id] = collections.OrderedDict([
('DG', system_id_data[DG_KEY].values.tolist()),
('dDG', system_id_data[DDG_KEY].values.tolist()),
('cpu_times', system_id_data['CPU time [s]'].values.tolist()),
('n_energy_evaluations', system_id_data['N energy evaluations'].values.tolist()),
])
# Export data of mean trajectory and confidence intervals.
mean_free_energies = submission.mean_free_energies()
for system_name in mean_free_energies['System name'].unique():
system_name_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Obtain free energies and bias.
free_energies = system_name_data[DG_KEY].values
free_energies_ci = system_name_data['$\Delta$G CI'].values
reference_diff = free_energies - reference_free_energies.loc[system_name, '$\Delta$G [kcal/mol]']
exported_data[system_name + '-mean'] = collections.OrderedDict([
('DG', free_energies.tolist()),
('DG_CI', free_energies_ci.tolist()),
('reference_difference', reference_diff.tolist()),
('n_energy_evaluations', system_name_data['N energy evaluations'].values.tolist()),
])
# Export.
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, submission.receipt_id)
export_dictionary(exported_data, file_base_path)
# =============================================================================
# PLOTTING FUNCTIONS
# =============================================================================
def plot_mean_free_energy(mean_data, ax, x='Simulation percentage',
color_mean=None, color_ci=None, zorder=None,
start=None, stride=1, scale_n_energy_evaluations=True,
plot_ci=True, **plot_kwargs):
"""Plot mean trajectory with confidence intervals."""
ci_key = '$\Delta$G CI'
if start is None:
# Discard the first datapoint which are 0.0 (i.e. no estimate).
start = np.nonzero(mean_data[DG_KEY].values)[0][0]
if x == 'N energy evaluations' and scale_n_energy_evaluations:
# Plot in millions of energy evaluations.
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
x = mean_data[x].values[start::stride] / scale
mean_dg = mean_data[DG_KEY].values[start::stride]
sem_dg = mean_data[ci_key].values[start::stride]
# Plot mean trajectory confidence intervals.
if plot_ci:
ax.fill_between(x, mean_dg + sem_dg, mean_dg - sem_dg, alpha=0.15, color=color_ci, zorder=zorder)
# Plot the mean free energy trajectory.
if zorder is not None:
# Push the CI shaded area in the background so that the trajectories are always visible.
zorder += 20
ax.plot(x, mean_dg, color=color_mean, alpha=1.0, zorder=zorder, **plot_kwargs)
return ax
def plot_mean_data(mean_data, axes, color=None, ls=None, label=None, x='N energy evaluations',
zorder=None, plot_std=True, plot_bias=True, plot_ci=True):
"""Plot free energy, variance and bias as a function of the cost in three different axes."""
# Do not plot the part of data without index.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
# If the x-axis is the number of energy/force evaluations, plot it in units of millions.
if x == 'N energy evaluations':
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(mean_data, x=x, ax=axes[0],
color_mean=color, color_ci=color, ls=ls, zorder=zorder,
start=first_nonzero_idx, label=label, plot_ci=plot_ci)
# Plot standard deviation of the trajectories.
if plot_std:
axes[1].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['std'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
if plot_bias:
axes[2].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['bias'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
def align_yaxis(ax1, v1, ax2, v2):
"""Adjust ax2 ylimit so that v2 in in the twin ax2 is aligned to v1 in ax1.
From https://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin .
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
# =============================================================================
# FIGURE 1 - SAMPLING CHALLENGE OVERVIEW
# =============================================================================
def plot_example_bias_variance(yank_analysis, type='mixed', cost='generic',
max_n_eval_percentage=1.0,
mixed_proportion=0.5,
model_free_energy=None,
plot_experimental_value=False):
"""Free energy trajectories used to visualize bias and variance on the plots.
This is used to illustrate how bias and uncertainty are intended in the paper.
Parameters
----------
type : str, optional
Can be 'single' (plot only CB8-G3-1), 'all' (plot all system IDs of CB8-G3),
'mean' (plot mean trajectory and uncertainties), and 'mixed (first part is
all system IDs and second part is mean trajectory and uncertainties).
cost : str, optional
Can be 'generic' (no label on x-axis) or 'neval' (x-axis in number of
energy evaluations).
mixed_proportion : float, optional
The proportion of all System IDs and mean trajectories in mixed-type plots.
"""
# sns.set_context('paper', font_scale=1.6)
sns.set_style('white')
sns.set_context('paper', font_scale=1.0)
# Load the data
n_iterations = 40000
cb8_data = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=False)
cb8_data_mean = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=True)
max_n_eval = max(cb8_data_mean['N energy evaluations'])
max_n_eval_scaled = int(max_n_eval / N_ENERGY_EVALUATIONS_SCALE)
max_displayed_n_eval = next(x for x in cb8_data_mean['N energy evaluations'] if x >= max_n_eval * max_n_eval_percentage)
max_displayed_n_eval_scaled = int(max_displayed_n_eval / N_ENERGY_EVALUATIONS_SCALE)
# Determine the asymptotic free energy if not given.
if model_free_energy is None:
model_free_energy = cb8_data_mean[DG_KEY].values[-1]
# Scale the number of energy evaluations.
cb8_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(2.5, 1.8))
if type == 'single':
# Plot only CB8-G3-1.
cb8_data_1 = cb8_data[cb8_data['System ID'] == 'CB8-G3-1']
sns.lineplot(data=cb8_data_1, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'all':
# Plot the 5 replicates individual trajectories.
sns.lineplot(data=cb8_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'mean':
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(cb8_data_mean, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci='black',
scale_n_energy_evaluations=True)
elif type == 'mixed':
# Plot all System IDs for the first half and mean/uncertainty in second half.
half_n_eval = max_displayed_n_eval_scaled * mixed_proportion
cb8_data_first_half = cb8_data[cb8_data['N energy evaluations'] <= half_n_eval + max_n_eval_scaled / 100]
sns.lineplot(data=cb8_data_first_half, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
cb8_data_second_half = cb8_data_mean[cb8_data_mean['N energy evaluations'] >= half_n_eval * N_ENERGY_EVALUATIONS_SCALE]
plot_mean_free_energy(cb8_data_second_half, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci=(0.3, 0.3, 0.3), scale_n_energy_evaluations=True,
ls='--')
try:
ax.get_legend().remove()
except AttributeError:
pass
# Set limits
x_lim = (0, max_displayed_n_eval_scaled)
ax.set_xlim(x_lim)
y_lim = (-12.5, -10.5)
ax.set_ylim(y_lim)
# Plot model and experiment indication. Both values are not real data, just an example.
model_free_energy = -10.75
final_prediction = cb8_data_mean[cb8_data_mean['N energy evaluations'] == max_displayed_n_eval][DG_KEY].values[0]
ax.plot(x_lim, [model_free_energy]*2, color='gray', ls='--')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, model_free_energy, r'$\Delta$G$_{\theta}$')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, final_prediction - 0.13, r'$\overline{\Delta G}$')
# Plot experimental value horizontal line only for generic plot.
if plot_experimental_value:
experiment_dg = -11.75
plt.plot(x_lim, [experiment_dg]*2, color='black')
if cost == 'neval':
ax.set_xlabel('N force/energy evaluations')
else:
ax.set_xlabel('Computational cost', labelpad=-5)
ax.set_ylabel('$\Delta$G', labelpad=-5)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.tight_layout(pad=0.1, rect=[0.0, 0.0, 0.90, 1.0])
# Save file.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure 1 - host-guest')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'example_trajectories')
plt.savefig(output_base_path + '.pdf')
# =============================================================================
# FIGURE 2 - MEAN ERROR AND RELATIVE EFFICIENCY CARTOON
# =============================================================================
def plot_mean_error_cartoon():
"""Plot the cartoon used to explain mean error and relative efficiency.
This is used as an example to clarify some gotchas with the difinition
of efficiency.
"""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sns.set_context('paper')
sns.set_style('white')
def err_decay_func_square(decay_coeff, c):
return decay_coeff / np.sqrt(c)
def mean_error_square(decay_coeff, c_min, c_max):
return 2 * decay_coeff * (np.sqrt(c_max) - np.sqrt(c_min)) / (c_max - c_min)
def err_decay_func_B(decay_coeff, c):
return decay_coeff / c**(5/6)
def mean_error_B(decay_coeff, c_min, c_max):
return 6 * decay_coeff * (c_max**(1/6) - c_min**(1/6)) / (c_max - c_min)
decay_coeffs = {
'A': 1.0,
'B': 2.5,
'Z': 1.5,
}
c_ranges = collections.OrderedDict([
("A'", np.arange(1, 4.5, 0.1)),
("A''", np.arange(3, 6, 0.1)),
("B", np.arange(2, 6.5, 0.1)),
("Z", np.arange(1, 6.5, 0.1)),
])
# Determine colors colors.
colors = {m: 'C'+str(i) for i, m in enumerate(sorted(c_ranges))}
# Plot the error trajectories.
fig, ax = plt.subplots(figsize=(3.5, 2.6))
# method_names = ["B", "Z", "A'", "A''"]
method_names = ["Z", "A'", "A''"]
for method_name in method_names:
color = colors[method_name]
c_range = c_ranges[method_name]
decay_coeff = decay_coeffs[method_name[0]]
if method_name == 'B':
err_decay_func = err_decay_func_B
else:
err_decay_func = err_decay_func_square
err = err_decay_func(decay_coeff, c_range)
# Plot error area.
ax.plot(c_range, err, color=color, label=method_name, zorder=1)
ax.fill_between(c_range, err, 0, color=color, alpha=0.5, zorder=0)
# Add method label.
c_method_label_idx = int(len(c_range) / 8)
ax.text(c_range[c_method_label_idx], err[c_method_label_idx]+0.01, method_name, fontsize=12)
if method_name[0] == 'A':
# Plot mean error.
c_min, c_max = min(c_range), max(c_range)
mean_err = mean_error_square(decay_coeff, c_min, c_max)
# Start mean error horizontal line from the error curve.
c_mean = (decay_coeff / mean_err)**2
ax.plot([0, c_mean], [mean_err, mean_err], color='black', ls='--', alpha=0.8, zorder=1)
# Add label mean error.
# ax.text(1.05, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
ax.text(-0.3, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
# Add c_min/max labels.
ax.text(c_min-0.4, -0.1, 'c$_{min,' + method_name + '}$', fontsize=9)
ax.text(c_max-0.4, -0.1, 'c$_{max,' + method_name + '}$', fontsize=9)
# Configure axes.
ax.set_xlim(1, 6.4)
ax.set_ylim(0, 2)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylabel('$RMSE(\Delta G)$')
ax.set_xlabel('computational cost')
# Pull axes labels closest to axes.
ax.tick_params(axis='x', which='major', pad=2.0)
ax.yaxis.set_label_coords(0.0, 0.65)
# Plot the relative efficiencies in an inset plot.
ax_ins = inset_axes(ax, width='100%', height='100%', bbox_to_anchor=[145, 115, 90, 50])
# Compute relative efficiencies with respect to Z.
relative_efficiencies = collections.OrderedDict()
for method_name in [name for name in method_names if name != 'Z']:
c_min, c_max = min(c_ranges[method_name]), max(c_ranges[method_name])
if method_name == 'B':
mean_error_func = mean_error_B
else:
mean_error_func = mean_error_square
mean_err_method = mean_error_func(decay_coeffs[method_name[0]], c_min, c_max)
mean_err_Z = mean_error_square(decay_coeffs['Z'], c_min, c_max)
relative_efficiencies[method_name] = -np.log(mean_err_method/mean_err_Z)
# Plot horizontal bar plot with all efficiencies.
labels, rel_effs = zip(*relative_efficiencies.items())
bar_colors = [colors[m] for m in labels]
labels = [l + '/Z' for l in labels]
# labels = ['$e_{err,' + str(l) + '/Z}$' for l in labels]
ax_ins.barh(y=labels, width=rel_effs, color=bar_colors, alpha=0.85)
ax_ins.set_title('relative efficiency', pad=2.5)
# plt.tight_layout(rect=[0.0, 0.0, 1.0, 1.0])
plt.tight_layout(rect=[0.1, 0.0, 1.0, 1.0])
# Pull axes labels closest to axes.
ax_ins.set_xticks([0.0])
ax_ins.grid(axis='x')
ax_ins.tick_params(axis='x', which='major', pad=0.0)
ax_ins.tick_params(axis='y', which='major', pad=0.0)
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure2-efficiency_cartoon')
os.makedirs(output_dir_path, exist_ok=True)
plt.savefig(os.path.join(output_dir_path, 'error_trajectories.pdf'))
# =============================================================================
# FIGURE 3 - FREE ENERGY TRAJECTORIES
# =============================================================================
def plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=None,
plot_std=True, plot_bias=True, plot_bias_to_reference=False,
system_names=None):
"""Plot free energy trajectories, std, and bias of the given submissions."""
if system_names is None:
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
n_systems = len(system_names)
max_n_energy_evaluations = {system_name: 0 for system_name in system_names}
min_n_energy_evaluations = {system_name: np.inf for system_name in system_names}
# Handle default arguments.
if y_limits is None:
# 3 by 3 matrix of y limits for the plots.
y_limits = [[None for _ in range(n_systems)] for _ in range(n_systems)]
# We need a 2D array of axes for the code to work even if we're not plotting std or bias.
try:
axes_shape = len(axes.shape)
except AttributeError:
axes = np.array([[axes]])
else:
if axes_shape == 1:
axes = np.array([axes])
# Build a dictionary mapping submissions and system names to their mean data.
all_mean_data = {}
for submission in submissions:
# We always want to print in order
all_mean_data[submission.paper_name] = {}
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge.
if submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3':
continue
# Add mean free energies for this system.
system_mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
n_energy_evaluations = system_mean_data['N energy evaluations'].values[-1]
all_mean_data[submission.paper_name][system_name] = system_mean_data
# Keep track of the maximum and minimum number of energy evaluations,
# which will be used to determine how to truncate the plotted reference
# data and determine the zorder of the trajectories respectively.
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
min_n_energy_evaluations[system_name] = min(min_n_energy_evaluations[system_name],
n_energy_evaluations)
# Add also reference YANK calculations if provided.
if yank_analysis is not None:
all_mean_data[YANK_METHOD_PAPER_NAME] = {}
for system_name in system_names:
system_mean_data = yank_analysis.get_free_energies_from_energy_evaluations(
max_n_energy_evaluations[system_name], system_name=system_name, mean_trajectory=True)
all_mean_data[YANK_METHOD_PAPER_NAME][system_name] = system_mean_data
# Create a table mapping submissions and system name to the zorder used
# to plot the free energy trajectory so that smaller shaded areas are on
# top of bigger ones.
# First find the average CI for all methods up to min_n_energy_evaluations.
methods_cis = {name: {} for name in system_names}
for method_name, method_mean_data in all_mean_data.items():
for system_name, system_mean_data in method_mean_data.items():
# Find index of all energy evaluations < min_n_energy_evaluations.
n_energy_evaluations = system_mean_data['N energy evaluations'].values
last_idx = np.searchsorted(n_energy_evaluations, min_n_energy_evaluations[system_name], side='right')
cis = system_mean_data['$\Delta$G CI'].values[:last_idx]
methods_cis[system_name][method_name] = np.mean(cis)
# For each system, order methods from smallest CI (plot on top) to greatest CI (background).
zorders = {name: {} for name in system_names}
for system_name, system_cis in methods_cis.items():
ordered_methods = sorted(system_cis.keys(), key=lambda method_name: system_cis[method_name])
for zorder, method_name in enumerate(ordered_methods):
zorders[system_name][method_name] = zorder
# The columns are in order CB8-G3, OA-G3, and OA-G6.
system_columns = {'CB8-G3': 0, 'OA-G3': 1, 'OA-G6': 2}
# Plot submissions in alphabetical order to order he legend labels.
for method_name in sorted(all_mean_data.keys()):
submission_mean_data = all_mean_data[method_name]
submission_color = SUBMISSION_COLORS[method_name]
submission_ls = SUBMISSION_LINE_STYLES[method_name]
# Plot free energy trajectories.
for system_name, mean_data in submission_mean_data.items():
ax_idx = system_columns[system_name]
# The OA prediction of the NS short protocol are the same of the long protocol submission file.
if method_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3':
# Just add the label.
axes[0][ax_idx].plot([], color=submission_color, ls=submission_ls, label=method_name)
continue
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values[-1]
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
# Determine zorder and plot.
zorder = zorders[system_name][method_name]
plot_mean_data(mean_data, axes[:,ax_idx], color=submission_color,
ls=submission_ls, zorder=zorder, label=method_name,
plot_std=plot_std, plot_bias=plot_bias)
# Fix labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
if plot_bias:
axes[2][0].set_ylabel('bias [kcal/mol]')
central_column_idx = int(len(axes[0])/2)
axes[-1][central_column_idx].set_xlabel('number of energy/force evaluations [10$^6$]')
# Fix axes limits.
for ax_idx, system_name in enumerate(system_names):
for row_idx in range(len(axes)):
ax = axes[row_idx][ax_idx]
# Set the x-axis limits.
ax.set_xlim((0, max_n_energy_evaluations[system_name]/N_ENERGY_EVALUATIONS_SCALE))
# Keep the x-axis label only at the bottom row.
if row_idx != len(axes)-1:
ax.xaxis.set_ticklabels([])
y_lim = y_limits[row_idx][ax_idx]
if y_lim is not None:
ax.set_ylim(y_lim)
# Set the system name in the title.
axes[0][ax_idx].set_title(system_name)
# Create a bias axis AFTER the ylim has been set.
if yank_analysis is not None and plot_bias_to_reference:
for ax_idx, (system_name, ax) in enumerate(zip(system_names, axes[0])):
yank_full_mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
ref_free_energy = yank_full_mean_data[DG_KEY].values[-1]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to fix the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if ax_idx == n_systems - 1:
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
def plot_all_entries_trajectory(submissions, yank_analysis, zoomed=False):
"""Plot free energy trajectories, std, and bias of the challenge entries."""
sns.set_style('whitegrid')
sns.set_context('paper')
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
if zoomed:
figsize = (7.25, 7.0) # Without REVO
else:
figsize = (7.25, 7.0) # With REVO
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=figsize)
# Optionally, remove REVO.
if zoomed:
submissions = [s for s in submissions if s.name not in ['WExploreRateRatio']]
if zoomed:
# Y-axis limits when REVO calculations are excluded.
y_limits = [
[(-15, -10), (-9, -4), (-9, -4)],
[(0, 2), (0, 0.8), (0, 0.8)],
[(-3, 1), (-0.6, 0.6), (-0.6, 0.6)],
]
else:
# Y-axis limits when REVO calculations are included.
y_limits = [
[(-17, -9), (-13, -5), (-13, -5)],
[(0, 2), (0, 1.75), (0, 1.75)],
[(-4, 4), (-0.6, 0.6), (-0.6, 0.6)],
]
plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=y_limits)
# Show/save figure.
if zoomed:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92], w_pad=0.0) # Without REVO
else:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92]) # With REVO
# Plot legend.
if zoomed:
# bbox_to_anchor = (2.52, 1.55) # Without REVO.
bbox_to_anchor = (2.4, 1.48)
else:
bbox_to_anchor = (2.4, 1.48) # With REVO.
axes[0][1].legend(loc='upper right', bbox_to_anchor=bbox_to_anchor,
fancybox=True, ncol=4)
plt.subplots_adjust(wspace=0.35)
# plt.show()
if zoomed:
file_name = 'Figure3-free_energy_trajectories_zoomed'
else:
file_name = 'Figure3-free_energy_trajectories'
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure3-free_energy_trajectories')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, file_name)
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 4 - NONEQUILIBRIUM SWITCHING ESTIMATOR COMPARISON
# =============================================================================
def plot_all_nonequilibrium_switching(submissions):
"""Plot free energy trajectories, std, and bias of the nonequilibrium-switching calculations."""
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
figsize = (7.25, 3.5)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
# Select nonequilibrium-switching calculations with estimators.
submissions = [s for s in submissions if 'NS' in s.paper_name]
# Y-axis limits.
y_limits = [
[(-20, 5), (-40, 0), (-40, 0)]
]
plot_submissions_trajectory(submissions, yank_analysis=None, axes=axes,
y_limits=y_limits, plot_std=False, plot_bias=False)
# Show/save figure.
plt.tight_layout(pad=0.0, rect=[0.0, 0.00, 1.0, 0.85])
# Plot legend.
legend = axes[0].legend(loc='upper left', bbox_to_anchor=(0.6, 1.3),
fancybox=True, ncol=3)
# Change legend labels to refer to estimator used rather than overall method ID.
legend_labels_map = {
'GROMACS/NS-DS/SB-long': 'BAR-long',
'GROMACS/NS-DS/SB': 'BAR',
'GROMACS/NS-Jarz-F': 'Jarzynski-Forward',
'GROMACS/NS-Jarz-R': 'Jarzynski-Reverse',
'GROMACS/NS-Gauss-F': 'Gaussian-Forward',
'GROMACS/NS-Gauss-R': 'Gaussian-Reverse',
}
for text in legend.get_texts():
text.set_text(legend_labels_map[text.get_text()])
plt.subplots_adjust(wspace=0.35)
# plt.show()
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure4-nonequilibrium_comparison')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'Figure4-nonequilibrium_comparison')
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 5 - BAROSTAT AND RESTRAINT
# =============================================================================
# Directories containing the volume information of YANK and GROMACS/EE.
BAROSTAT_DATA_DIR_PATH = os.path.join('..', 'SAMPLing', 'Data', 'BarostatData')
YANK_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'YankVolumes')
EE_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'EEVolumes')
def plot_volume_distributions(axes, plot_predicted=False):
"""Plot the volume distributions obtained with Monte Carlo and Berendsen barostat."""
import scipy.stats
import scipy.integrate
from simtk import unit
# Load data.
mc_volumes = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure100.npy'))),
(100, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure10000.npy'))),
])
mc_volumes_hrex = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state0.npy'))),
(58, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state58.npy'))),
])
b_volumes = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_vanilla.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_vanilla.npy'))),
])
b_volumes_ee = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_expanded.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_expanded.npy'))),
])
# Print some statistics for each distribution.
for volume_trajectories, label in [(mc_volumes, 'MC-MD '),
(mc_volumes_hrex, 'MC-HREX'),
(b_volumes, 'BB-MD '),
(b_volumes_ee, 'BB-EE ')]:
for pressure, trajectory in volume_trajectories.items():
n = len(trajectory)
t_stat = 2.326 # 98% CI
mean = np.mean(trajectory)
sem = scipy.stats.sem(trajectory)
mean_ci = t_stat * sem
var = np.var(trajectory, ddof=1)
# Standard error of variance if volume is gaussianly distributed
sev = var * np.sqrt(2 / (n-1))
var_ci = t_stat * sev
skew = scipy.stats.skew(trajectory)
# Standard error of skewness if volume is gaussianly distributed
ses = np.sqrt( 6*n*(n-1) / ((n-2)*(n+1)*(n+3)) )
skew_ci = t_stat * ses
print('{}-{} (n={}): mean={:.3f} +- {:.3f}nm^3\t\tvar={:.3f} +- {:.3f}\tskew={:.3f} +- {:.3f}'.format(
pressure, label, n, mean, mean_ci, var, var_ci, skew, skew_ci))
# Plot the 1atm vs 100atm comparison.
barostats = ['B', 'MC']
for ax, volume_trajectories, barostat in zip(axes, [b_volumes, mc_volumes], barostats):
barostat += ',MD'
barostat = 'MD'
for pressure, trajectory in volume_trajectories.items():
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)'.format(barostat, pressure)
ax = sns.distplot(trajectory, label=label, hist=False, ax=ax)
if plot_predicted:
# Plot predicted distribution.
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * 298.15*unit.kelvin)
p1 = 1.0 * unit.atmosphere
p2 = 100.0 * unit.atmosphere
volumes = np.linspace(78.0, 82.0, num=200)
fit = scipy.stats.norm
# Fit the original distribution.
original_pressure, new_pressure = list(volume_trajectories.keys())
original_trajectory = list(volume_trajectories.values())[0]
fit_parameters = fit.fit(original_trajectory)
# Find normalizing constant predicted distribution.
predicted_distribution = lambda v: np.exp(-beta*(p2 - p1)*v*unit.nanometer**3) * fit.pdf([v], *fit_parameters)
normalizing_factor = scipy.integrate.quad(predicted_distribution, volumes[0], volumes[-1])[0]
predicted = np.array([predicted_distribution(v) / normalizing_factor for v in volumes])
# Set the scale.
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)$\cdot e^{{\\beta ({}atm - {}atm) V}}$'.format(barostat, original_pressure, new_pressure, original_pressure)
ax.plot(volumes, predicted, ls='--', label=label)
# ax.plot(volumes, [fit.pdf([v], *fit_parameters) for v in volumes], label='original')
# Plot comparison MD vs expanded ensemble and HREX volumes.
for ax_idx, (trajectory, label) in enumerate([
(b_volumes_ee[1], 'B,EE'), (mc_volumes_hrex[1], 'MC,HREX')
]):
label = 'E'
ax = axes[ax_idx]
label = '$\\rho_{{\mathrm{{{}}}}}$(V|1atm)'.format(label)
sns.distplot(trajectory, label=label, hist=False, ax=ax)
# Set titles and configure axes.
axes[0].set_title('Berendsen barostat volume distribution', pad=2.0)
axes[1].set_title('Monte Carlo barostat volume distribution', pad=2.0)
for ax_idx in range(len(axes)):
axes[ax_idx].set_xlim((78.8, 81.2))
axes[ax_idx].set_ylim((0.0, 6.0))
axes[ax_idx].set_ylabel('density')
axes[0].set_xlabel('', labelpad=0.3)
axes[1].set_xlabel('Volume [nm^3]', labelpad=0.3)
# Create single legend for both MC and B barostat axes.
bbox_to_anchor = (-0.1, -0.15)
axes[0].legend(fontsize='xx-small', loc='upper left', bbox_to_anchor=bbox_to_anchor, ncol=4,
fancybox=True, labelspacing=0.7, handletextpad=0.4, columnspacing=1.1,)
# axes[0].get_legend().remove()
axes[1].get_legend().remove()
plt.tight_layout(pad=0, rect=[0.0, 0.0, 1.0, 1.0])
# Directory with the restraint information.
RESTRAINT_DATA_DIR_PATH = os.path.join('YankAnalysis', 'RestraintAnalysis')
# The state index of the discharged state with LJ interactions intact.
DISCHARGED_STATE = {
'CB8-G3': 25,
'OA-G3': 32,
'OA-G6': 29
}
# The final free energy predictions without restraint unbiasing.
BIASED_FREE_ENERGIES = {
'CB8-G3-0': -10.643,
'CB8-G3-1': -10.533,
'CB8-G3-2': -10.463,
'CB8-G3-3': None, # TODO: Run the biased analysis
'CB8-G3-4': -10.324,
'OA-G3-0': -5.476,
'OA-G3-1': -5.588,
'OA-G3-2': -5.486,
'OA-G3-3': -5.510,
'OA-G3-4': -5.497,
'OA-G6-0': -5.669,
'OA-G6-1': -5.665,
'OA-G6-2': -5.767,
'OA-G6-3': -5.737,
'OA-G6-4': -5.788,
}
def plot_restraint_distance_distribution(system_id, ax, kde=True, iteration_set=None):
"""Plot the distribution of restraint distances at bound, discharged, and decoupled states.
Return the 99.99-percentile restraint radius that was used as a cutoff during analysis.
"""
n_iterations = YANK_N_ITERATIONS + 1 # Count also iteration 0.
system_name = system_id[:-2]
discharged_state_idx = DISCHARGED_STATE[system_name]
# Load all distances cached during the analysis.
cache_dir_path = os.path.join('pkganalysis', 'cache', system_id.replace('-', ''))
cached_distances_file_path = os.path.join(cache_dir_path, 'restraint_distances_cache.npz')
distances_kn = np.load(cached_distances_file_path)['arr_0']
# Distances are in nm but we plot in Angstrom.
distances_kn *= 10
n_states = int(len(distances_kn) / n_iterations)
# Use the same colors that are used in the water analysis figures.
color_palette = sns.color_palette('viridis', n_colors=n_states)
color_palette = [color_palette[i] for i in (0, discharged_state_idx, -1)]
# Isolate distances in the bound, discharged (only LJ), and decoupled state.
distances_kn_bound = distances_kn[:n_iterations]
distances_kn_discharged = distances_kn[(discharged_state_idx-1)*n_iterations:discharged_state_idx*n_iterations]
distances_kn_decoupled = distances_kn[(n_states-1)*n_iterations:]
# Filter iterations.
if iteration_set is not None:
distances_kn_bound = distances_kn_bound[iteration_set]
distances_kn_discharged = distances_kn_discharged[iteration_set]
distances_kn_decoupled = distances_kn_decoupled[iteration_set]
assert len(distances_kn_bound) == len(distances_kn_decoupled)
# Plot the distributions.
# sns.distplot(distances_kn, ax=ax, kde=True, label='all states')
sns.distplot(distances_kn_bound, ax=ax, kde=kde, label='bound', color=color_palette[0])
sns.distplot(distances_kn_discharged, ax=ax, kde=kde, label='discharged', color=color_palette[1])
sns.distplot(distances_kn_decoupled, ax=ax, kde=kde, label='decoupled', color=color_palette[2])
# Plot the threshold used for analysis, computed as the
# 99.99-percentile of all distances in the bound state.
distance_cutoff = np.percentile(a=distances_kn_bound, q=99.99)
limits = ax.get_ylim()
ax.plot([distance_cutoff for _ in range(100)],
np.linspace(limits[0], limits[1]/2, num=100), color='black')
return distance_cutoff
def plot_restraint_profile(system_id, ax, restraint_cutoff):
"""Plot the free energy as a function of the restraint cutoff."""
# Load the free energy profile for this system.
restraint_profile_file_path = os.path.join(RESTRAINT_DATA_DIR_PATH,
system_id.replace('-', '') + '.json')
with open(restraint_profile_file_path, 'r') as f:
free_energies_profile = json.load(f)
# Reorder the free energies by increasing cutoff and convert str keys to floats.
free_energies_profile = [(float(d), f) for d, f in free_energies_profile.items()]
free_energies_profile = sorted(free_energies_profile, key=lambda x: x[0])
distance_cutoffs, free_energies = list(zip(*free_energies_profile))
f, df = list(zip(*free_energies))
# Convert string to floats.
distance_cutoffs = [float(c) for c in distance_cutoffs]
# Plot profile.
ax.errorbar(x=distance_cutoffs, y=f, yerr=df, label='after reweighting')
# Plot biased free energy
biased_f = BIASED_FREE_ENERGIES[system_id]
x = np.linspace(*ax.get_xlim())
ax.plot(x, [biased_f for _ in x], label='before reweighting')
# Plot restraint distance cutoff.
limits = ax.get_ylim()
x = [restraint_cutoff for _ in range(100)]
y = np.linspace(limits[0], limits[1], num=100)
ax.plot(x, y, color='black')
def plot_restraint_analysis(system_id, axes):
"""Plot distribution of restraint distances and free energy profile on two axes."""
# Histograms of restraint distances/energies.
ax = axes[0]
kde = True
restraint_cutoff = plot_restraint_distance_distribution(system_id, ax, kde=kde)
# Set restraint distance distribution lables and titles.
ax.set_title('Restrained ligand-receptor distance', pad=2.0)
if kde is False:
ax.set_ylabel('Number of samples')
else:
ax.set_ylabel('density')
ax.legend(loc='upper right', fontsize='x-small')
ax.set_xlabel('Restrained distance [$\mathrm{\AA}$]', labelpad=0.3)
# Free energy as a function of restraint distance.
ax = axes[1]
ax.set_title('$\Delta G$ as a function of restraint radius cutoff', pad=2.0 )
plot_restraint_profile(system_id, ax, restraint_cutoff)
# Labels and legend.
ax.set_xlabel('Restraint radius cutoff [$\mathrm{\AA}$]', labelpad=0.3)
ax.set_ylabel('$\Delta G$ [kcal/mol]')
ax.legend(fontsize='x-small')
def plot_restraint_and_barostat_analysis():
"""Plot the Figure showing info for the restraint and barostat analysis."""
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('whitegrid')
sns.set_context('paper', font_scale=1.0)
# Create two columns, each of them share the x-axis.
fig = plt.figure(figsize=(7.25, 4))
# Restraint distribution axes.
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223, sharex=ax1)
barostat_axes = [ax1, ax2]
# Volume distribution axes.
ax3 = fig.add_subplot(222)
ax4 = fig.add_subplot(224, sharex=ax3)
restraint_axes = [ax3, ax4]
# Plot barostat analysis.
plot_volume_distributions(barostat_axes, plot_predicted=True)
# Plot restraint analysis.
system_id = 'OA-G3-0'
plot_restraint_analysis(system_id, restraint_axes)
# Configure axes.
restraint_axes[0].set_xlim((0, 10.045))
restraint_axes[1].set_ylim((-7, -3.9))
for ax in restraint_axes + barostat_axes:
ax.tick_params(axis='x', which='major', pad=0.1)
ax.tick_params(axis='y', which='major', pad=0.1)
plt.tight_layout(pad=0.3)
# plt.show()
output_file_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure5-restraint_barostat',
'restraint_barostat.pdf')
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
# =============================================================================
# FIGURE 6 - HREX INITIAL BIAS
# =============================================================================
def plot_yank_system_bias(system_name, data_dir_paths, axes, shift_to_origin=True, plot_std=True):
"""Plot the YANK free energy trajectoies when discarding initial samples for a single system."""
color_palette = sns.color_palette('viridis', n_colors=len(data_dir_paths)+1)
# Plot trajectories with truncated data.
all_iterations = set()
for data_idx, data_dir_path in enumerate(data_dir_paths):
yank_analysis = YankSamplingAnalysis(data_dir_path)
# In the YankAnalysis folder, each analysis starting from
# iteration N is in the folder "iterN/".
last_dir_name = os.path.basename(os.path.normpath(data_dir_path))
label = last_dir_name[4:]
# First color is for the full data.
color = color_palette[data_idx+1]
# Collect all iterations that we'll plot for the full data.
mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
all_iterations.update(mean_data['HREX iteration'].values.tolist())
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label=label, plot_std=plot_std, plot_bias=False, plot_ci=False)
# Plot trajectory with full data.
color = color_palette[0]
# Plot an early iteration and all the iterations analyzed for the bias.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
system_ids = [system_name + '-' + str(i) for i in range(5)]
first_iteration = yank_analysis.get_system_iterations(system_ids[0])[2]
iterations = [first_iteration] + sorted(all_iterations)
mean_data = yank_analysis._get_free_energies_from_iterations(
iterations, system_ids, mean_trajectory=True)
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
# Simulate ploatting starting from the origin.
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label='0', plot_std=plot_std, plot_bias=False, plot_ci=False)
axes[0].set_title(system_name)
def plot_yank_bias(plot_std=True, figure_dir_path=None):
"""Plot YANK free energy trajectories when discarding initial samples."""
# In the first column, plot the "unshifted" trajectory of CB8-G3,
# with all sub-trajectories shifted to the origin. In the second
# and third columns, plot the trajectories of CB8-G3 and OA-G3
# with all sub-trajectories shifted to the origin.
what_to_plot = [
('CB8-G3', False),
# ('CB8-G3', True),
('OA-G3', False),
# ('OA-G3', False),
('OA-G6', False),
]
if plot_std:
n_rows = 2
else:
n_rows = 1
n_cols = len(what_to_plot)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(7.25, 4.0))
# The loops are based on a two dimensional array of axes.
if n_rows == 1:
axes = np.array([axes])
# Sort paths by how many samples they have.
data_dir_paths = ['YankAnalysis/BiasAnalysis/iter{}/'.format(i) for i in [1000, 2000, 4000, 8000, 16000, 24000]]
for column_idx, (system_name, shift_to_origin) in enumerate(what_to_plot):
plot_yank_system_bias(system_name, data_dir_paths, axes[:,column_idx],
shift_to_origin=shift_to_origin, plot_std=plot_std)
title = system_name + ' (shifted)' if shift_to_origin else system_name
axes[0,column_idx].set_title(title)
# Fix axes limits and labels.
ylimits = {
'CB8-G3': (-12.5, -10.5),
'OA-G3': (-8, -6),
'OA-G6': (-8, -6)
}
for column_idx, (system_name, _) in enumerate(what_to_plot):
axes[0][column_idx].set_ylim(ylimits[system_name])
if plot_std:
axes[1][column_idx].set_ylim((0, 0.6))
for row_idx, ax_idx in itertools.product(range(n_rows), range(n_cols)):
# Control the number of ticks for the x axis.
axes[row_idx][ax_idx].locator_params(axis='x', nbins=4)
# Set x limits for number of iterations.
axes[row_idx][ax_idx].set_xlim((0, YANK_N_ITERATIONS))
# Remove ticks labels that are shared with the last row.
for row_idx, ax_idx in itertools.product(range(n_rows-1), range(n_cols)):
axes[row_idx][ax_idx].set_xticklabels([])
# Set axes labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
# If there is an odd number of columns print x label only on the central one.
if n_cols % 2 == 1:
axes[-1][1].set_xlabel('HREX iteration')
else:
for ax in axes[-1]:
ax.set_xlabel('HREX iteration')
plt.tight_layout(h_pad=0.1, rect=[0.0, 0.00, 1.0, 0.91])
handles, labels = axes[0][0].get_legend_handles_labels()
handles = [handles[-1]] + handles[:-1]
labels = [labels[-1]] + labels[:-1]
bbox_to_anchor = (0.4, 1.53)
axes[0][0].legend(handles, labels, loc='upper left', bbox_to_anchor=bbox_to_anchor,
title='number of discarded initial iterations', ncol=len(data_dir_paths)+1,
fancybox=True, labelspacing=0.8, handletextpad=0.5, columnspacing=1.2,
fontsize='small')
# plt.show()
if figure_dir_path is None:
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure6-bias_hrex')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_path = os.path.join(figure_dir_path, 'Figure6-bias_hrex')
plt.savefig(output_file_path + '.pdf')
# plt.savefig(output_file_path + '.png', dpi=600)
# =============================================================================
# SUPPORTING INFORMATION - EXAMPLE OF HREX BIAS
# =============================================================================
def simulate_correlation_samples():
"""Simulation of bias from same initial configuration.
There are 3 states as different harmonic oscillators, but all
or almost all the samples come from the first (bound) state to
simulate what happens when they don't decorrelate fast enough.
The hypothesis is that most is that starting from the bound
state causes the initial free energy to be artificially negative
if the correlation times are long.
The second (discharged) state is just a shifted harmonic oscillator
(same free energy as bound state). The third (unbound) is shifted
and has much higher entropy.
"""
from numpy.random import normal
from pymbar import MBAR
def harmonic_oscillator_free_energy(sigma):
"""Analytical expression for the free energy of a harmonic oscillator."""
#return - np.log(2 * np.pi * sigma**2) * 3.0 / 2.0 # 3D oscillator
return - np.log(np.sqrt(2 * np.pi) * sigma)
def harmonic_oscillator_potential(x, loc, std):
"""Compute potential of the given positions given location
and standard deviation of the Gaussian distribution.
Potentials are returned in units of kT.
"""
spring_constant = 1 / std**2
return spring_constant / 2.0 * (x - loc)**2
def print_free_energies(Deltaf_ij, dDeltaf_ij):
mbar_str = ', '.join(['{:.4f} +- {:.4f}'.format(f, df) for f, df in zip(Deltaf_ij[:,0], dDeltaf_ij[:,0])])
print('MBAR :', mbar_str)
analytical_str = ', '.join(['{:.4f} '.format(f) for f in analytical_Deltaf])
print('Analytical:', analytical_str)
def compute_mbar_free_energy(all_samples, shifts, stds, analytical_f):
n_states = len(all_samples)
# u_kn[k,n] is the reduced potential energy n-th sample evaluated at state k.
u_kn = np.empty(shape=(n_states, n_states*n_samples))
# Convert samples to potentials.
for k in range(n_states):
for sampled_k, samples in enumerate(all_samples):
start = sampled_k * n_samples
end = (sampled_k + 1) * n_samples
u_kn[k,start:end] = harmonic_oscillator_potential(samples, loc=shifts[k], std=stds[k])
# Compute MBAR free energy.
N_k = np.array([n_samples] * n_states)
mbar = MBAR(u_kn, N_k=N_k, initial_f_k=analytical_f)
Deltaf_ij, dDeltaf_ij, _ = mbar.getFreeEnergyDifferences()
return Deltaf_ij, dDeltaf_ij
# Determine standard deviation and shift of the harmonic distributions.
n_samples = 5000000
stds = np.array([2.0, 2.0, 5.0])
shifts = np.array([0.0, 2.0, 2.0])
print('\nspring constants:', 1 / stds**2)
# Compute analytical free energy.
analytical_f = np.array([harmonic_oscillator_free_energy(s) for s in stds])
analytical_Deltaf = np.array([analytical_f[0] - analytical_f[i] for i in range(len(stds))])
# FIRST TEST.
# Sample from all states and verify that MBAR free energy is correct.
# -------------------------------------------------------------------
all_samples = [normal(loc=l, scale=s, size=n_samples) for l, s in zip(shifts, stds)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# SECOND TEST.
# Check if the bias is not due to lack of overlap. If we sample only the end states the estimate should be correct.
# -----------------------------------------------------------------------------------------------------------------
for i in range(1, len(all_samples)):
all_samples_bar = [all_samples[0], all_samples[i]]
shifts_bar = [shifts[0], shifts[i]]
stds_bar = [stds[0], stds[i]]
analytical_f_bar = [analytical_f[0], analytical_f[i]]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples_bar, shifts_bar, stds_bar, analytical_f_bar)
print('\nBAR_{}0'.format(i))
print_free_energies(Deltaf_ij, dDeltaf_ij)
# THIRD TEST.
# Now sample from only the bound state to see how the free energy changes.
# ------------------------------------------------------------------------
all_samples[1:] = [normal(loc=shifts[0], scale=stds[0], size=n_samples) for _ in range(len(stds)-1)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# FOURTH TEST.
# Now let the unbound state decorrelate fast (i.e. sample from its own distribution).
# -----------------------------------------------------------------------------------
all_samples[-1] = normal(loc=shifts[-1], scale=stds[-1], size=n_samples)
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# RESULT: SUCCESS!!!
# =============================================================================
# SUPPORTING INFORMATION - COMPLEX/SOLVENT and ENTROPY/ENTHALPY DECOMPOSITION
# =============================================================================
def _mean_data_decomposition(data):
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
try:
# This may fail if we have computed different iterations for each.
data = np.array(data, dtype=np.float)
except ValueError:
data_lengths = [len(x) for x in data]
print('Warning: Truncating data of shape {}'.format(data_lengths))
min_length = min(data_lengths)
data = [x[:min_length] for x in data]
data = np.array(data, dtype=np.float)
# Compute std and mean along the trajectory ignoring NaNs.
return np.nanmean(data, axis=0), np.nanstd(data, axis=0)
def _plot_phase_decomposition(ax, phase_free_energies):
# Shortcuts.
data = phase_free_energies
label = '$\Delta$G'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
phase_axes = {
'complex': ax.twinx(),
'solvent': ax.twinx()
}
phase_colors = {
'complex': 'C1',
'solvent': 'C0',
}
for ax_name in sorted(phase_axes):
phase_axes[ax_name].set_ylabel(label + ' ' + ax_name + ' [kcal/mol]',
color=phase_colors[ax_name])
phase_axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Compute total free energy summing complex and solvent for all replicates.
total_mean = [np.array(data['solvent'][i]) + np.array(data['complex'][i]) for i in range(5)]
total_mean, total_std = _mean_data_decomposition(total_mean)
# Compute and plot the phase free energy.
for phase_name in ['complex', 'solvent']:
color = phase_colors[phase_name]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
# Plot each phase data on a separate axis to make the comparison easier.
phase_axes[phase_name].plot(data[phase_name], ls='-', color=color,
label=label + ' ' + phase_name)
# Plot uncertainties.
phase_axes[phase_name].fill_between(x=list(range(len(std))), y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color='black', label=label+' total')
ax.plot(total_mean, color='black', label=label+' total')
ax.fill_between(x=list(range(len(total_std))), y1=total_mean-total_std,
y2=total_mean+total_std, color='black', alpha=0.7)
ax.set_ylabel(label + ' total [kcal/mol]')
ax.set_xlabel('simulation percentage')
# Make the range of all y axes the same.
ax.set_ylim((-21, -18))
phase_axes['complex'].set_ylim((-151.0, -148.0))
phase_axes['solvent'].set_ylim((129.0, 132.0))
def _plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy):
# Analyze only the complex.
phase_name = 'complex'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
axes = {
'$\Delta$G': ax,
'$\Delta$H': ax.twinx(),
'-T$\Delta$S': ax.twinx(),
}
colors = {
'$\Delta$G': 'black',
'$\Delta$H': 'C1',
'-T$\Delta$S': 'C0',
}
for ax_name in sorted(axes):
axes[ax_name].set_ylabel(ax_name + ' ' + phase_name + ' [kcal/mol]', color=colors[ax_name])
axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Variable used to propagate entropy decomposition.
entropy_std = []
# Plot the total average free energy and enthalpy and for each phase.
for data, label in [(phase_free_energies, '$\Delta$G'),
(phase_enthalpy, '$\Delta$H')]:
color = colors[label]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
ns_replica = np.arange(0.0, 40.0, 40/len(std))
# Plot each phase data on a separate axis to make the comparison easier.
axes[label].plot(ns_replica, data[phase_name], ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Propagate uncertainty.
if len(entropy_std) == 0:
entropy_std = std**2
else:
entropy_std += std**2
entropy_std = np.sqrt(entropy_std)
# Plot also entropies.
label = '-T$\Delta$S'
color = colors[label]
entropy = phase_free_energies[phase_name] - phase_enthalpy[phase_name]
axes[label].plot(ns_replica, entropy, ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=entropy-entropy_std,
y2=entropy+entropy_std, color=color, alpha=0.7)
ax.set_xlabel('ns/replica')
def plot_decomposition(system_name, starting_iteration, type, output_file_path):
"""
Decomposition of the free energy trajectory in complex/solvent phase or entropy/enthalpy.
Parameters
----------
type : str
Can be 'entropy-enthalpy' or 'phase'.
"""
data_file_pattern = 'YankAnalysis/BiasAnalysis/iter{}/fe-decomposition-{}-{{}}.json'.format(
starting_iteration, system_name)
n_replicates = 5
phase_free_energies = {'complex': [[] for _ in range(n_replicates)],
'solvent': [[] for _ in range(n_replicates)]}
phase_enthalpy = copy.deepcopy(phase_free_energies)
for replicate_idx in range(n_replicates):
# Read decomposition data.
decomposition_data_file_path = data_file_pattern.format(replicate_idx)
with open(decomposition_data_file_path, 'r') as f:
decomposition_data = json.load(f)
# Read free energy and enthalpy at each iteration.
sorted_decomposition_data = sorted(decomposition_data, key=lambda x: int(x.split('-')[1]))
for phase_iter in sorted_decomposition_data:
decomposition = decomposition_data[phase_iter]
phase_name, iteration = phase_iter.split('-')
# Correct sign consistent with thermodynamic cycle.
if phase_name == 'complex':
sign = -1
else:
sign = 1
corrected_free_energy = sign * (decomposition['DeltaF'] + decomposition['DeltaF_standard_state_correction'])
phase_free_energies[phase_name][replicate_idx].append(corrected_free_energy)
# Multiplication works only if enthalpy is not None.
if decomposition['DeltaH'] is not None:
decomposition['DeltaH'] *= sign
phase_enthalpy[phase_name][replicate_idx].append(decomposition['DeltaH'])
# Create figure.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.25, 4.6))
if type == 'entropy-enthalpy':
_plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy)
else:
_plot_phase_decomposition(ax, phase_free_energies)
# # Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color=color, label=label)
# totals.append(total)
# Plot also entropies.
# ax.plot(totals[0] - totals[1], color='blue', label='-T$\Delta$S')
# ax.set_ylim((-20, -18))
# phase_axes['complex'].set_ylim((-153, -148))
# phase_axes['solvent'].set_ylim((128, 133))
# ax.set_ylim((-23, -18))
# phase_axes['complex'].set_ylim((30, 45))
# phase_axes['solvent'].set_ylim((-55, -40))
# ax.legend()
plt.tight_layout()
if output_file_path is not None:
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
else:
plt.show()
# =============================================================================
# RELATIVE EFFICIENCY ANALYSIS
# =============================================================================
def get_relative_efficiency_input(submission, yank_analysis, system_name):
"""Prepare the data to compute the mean relative efficiencies for this system."""
# For GROMACS/EE-fullquil we need to account for the extra equilibration
# cost and shift all energy evaluation to the right.
if submission.paper_name == 'GROMACS/EE-fullequil':
mean_free_energies = submission.mean_free_energies()
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_shifted = mean_data['N energy evaluations'].values[0]
last_shifted = mean_data['N energy evaluations'].values[-1]
calibration_cost = first_shifted*100/99 - last_shifted/99
else:
calibration_cost = 0
# Isolate the data for the system.
data_sub = submission.data[submission.data['System name'] == system_name]
n_energy_evaluations = max(data_sub['N energy evaluations'])
data_ref = yank_analysis.get_free_energies_from_energy_evaluations(
n_energy_evaluations, system_name=system_name, mean_trajectory=False,
start=calibration_cost)
# Obtain the free energies for the submission.
n_replicates = 5
free_energy_sub = np.empty(shape=(n_replicates, 100))
free_energy_ref = np.empty(shape=(n_replicates, 100))
for data, free_energy in [
(data_sub, free_energy_sub),
(data_ref, free_energy_ref),
]:
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = data[data['System ID'] == system_id]
free_energy[i] = system_id_data[DG_KEY].values
# Discard the initial frames of REVO and GROMACS/EE that don't have predictions.
from pkganalysis.efficiency import discard_initial_zeros
free_energy_ref, free_energy_sub = discard_initial_zeros(free_energy_ref, free_energy_sub)
# Determine the actual asymptotic free energy of YANK.
asymptotic_free_energy_ref = yank_analysis.get_reference_free_energies()[system_name]
return free_energy_ref, free_energy_sub, asymptotic_free_energy_ref
def compute_all_relative_efficiencies(
free_energy_A, free_energy_B, ci, n_bootstrap_samples,
asymptotic_free_energy_A=None, asymptotic_free_energy_B=None
):
from pkganalysis.efficiency import EfficiencyAnalysis
analysis = EfficiencyAnalysis(free_energy_A, free_energy_B,
asymptotic_free_energy_A,
asymptotic_free_energy_B)
std_rel_eff = analysis.compute_std_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
abs_bias_rel_eff = analysis.compute_abs_bias_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
rmse_rel_eff = analysis.compute_rmse_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
if ci is None:
rel_eff = [std_rel_eff, abs_bias_rel_eff, rmse_rel_eff]
return rel_eff
else:
rel_eff = [std_rel_eff[0], abs_bias_rel_eff[0], rmse_rel_eff[0]]
cis = [std_rel_eff[1], abs_bias_rel_eff[1], rmse_rel_eff[1]]
return rel_eff, cis
def plot_relative_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000,
same_plot=False, step_cumulative=2):
sns.set_style('whitegrid')
sns.set_context('paper')
statistic_names = ['std', 'absolute bias', 'RMSE']
# Create output directory.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
# Check if we need all the efficiencies in the same plot or not.
if same_plot:
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
# Keep track of data range by statistic.
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
# Keep track of n_energy_evaluations by column.
max_n_energy_evaluations = [0 for _ in range(3)]
for submission in submissions:
if submission.paper_name in {'OpenMM/REVO'}:
continue
# if submission.paper_name in {'AMBER/APR', 'GROMACS/NS-DS/SB', 'GROMACS/NS-DS/SB-long',
# 'NAMD/BAR', 'GROMACS/EE', 'GROMACS/EE-fullequil', 'OpenMM/SOMD'}:
# continue
print(submission.paper_name)
system_names = submission.data['System name'].unique()
# Create figure.
if not same_plot:
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name:
system_names = system_names[~(system_names == 'CB8-G3')]
fig, axes = plt.subplots(nrows=3, ncols=len(system_names),
figsize=(7.25, 8))
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
for col_idx, system_name in enumerate(system_names):
color = SUBMISSION_COLORS[submission.paper_name]
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and system_name != 'CB8-G3':
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
rel_eff = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Use the same asymptotic free energies to compute the absolute bias
# relative efficiency as a function of the simulation length.
asymptotic_free_energy_sub = free_energy_sub.mean(axis=0)[-1]
# # Print relative efficiencies.
# print(system_name, ci)
# if ci is not None:
# for rel_eff, bounds in zip(rel_eff, cis):
# print('\t', rel_eff, bounds.tolist())
# else:
# for rel_eff in rel_eff:
# print('\t', rel_eff)
# Compute mean efficiencies as a function of the length of the simulation.
n_costs = free_energy_ref.shape[1]
n_rel_eff = int(n_costs / step_cumulative)
relative_efficiencies = np.empty(shape=(3, n_rel_eff))
low_bounds = np.empty(shape=(3, n_rel_eff))
high_bounds = np.empty(shape=(3, n_rel_eff))
for i, c in enumerate(range(step_cumulative-1, n_costs, step_cumulative)):
c1 = c + 1
rel_eff = compute_all_relative_efficiencies(
free_energy_ref[:,:c1], free_energy_sub[:,:c1],
ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref,
asymptotic_free_energy_B=asymptotic_free_energy_sub
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Update CI lower and upper bound.
relative_efficiencies[:,i] = rel_eff
if ci is not None:
low_bounds[:,i] = [x[0] for x in cis]
high_bounds[:,i] = [x[1] for x in cis]
# Get number of energy evaluations.
mean_data = submission.mean_free_energies(system_name=system_name)
# Check how many initial iteration have been discarded.
discarded_iterations = 100 - n_costs
n_energy_evaluations = mean_data['N energy evaluations'].values[
discarded_iterations+1::step_cumulative] / 1e6
for row_idx, rel_eff in enumerate(relative_efficiencies):
ax = axes[row_idx][col_idx]
ax.plot(n_energy_evaluations, rel_eff, color=color, label=submission.paper_name)
# Plot back line at 0.
ax.plot(n_energy_evaluations, [0 for _ in n_energy_evaluations], color='black', ls='--')
# Update data range.
statistic_range = statistic_ranges[statistic_names[row_idx]]
# if ci is None:
# min_rel_eff = min(rel_eff)
# max_rel_eff = max(rel_eff)
# else:
# min_rel_eff = min(*rel_eff, *low_bounds[row_idx])
# max_rel_eff = max(*rel_eff, *high_bounds[row_idx])
statistic_range[0] = min(statistic_range[0], min(rel_eff))
statistic_range[1] = max(statistic_range[1], max(rel_eff))
# Update x-axis range.
if same_plot:
max_n_energy_evaluations[col_idx] = max(max_n_energy_evaluations[col_idx],
n_energy_evaluations[-1])
else:
for row_idx in range(len(statistic_names)):
axes[row_idx][col_idx].set_xlim((0, n_energy_evaluations[-1]))
if ci is not None:
# Plot confidence intervals.
for row_idx, (low_bound_c, high_bound_c) in enumerate(zip(low_bounds, high_bounds)):
ax = axes[row_idx][col_idx]
ax.fill_between(n_energy_evaluations, low_bound_c, high_bound_c,
alpha=0.35, color='gray')
# We do this multiple times unnecessarily if same_plot is True, but the code is simpler.
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel(statistic_name + ' rel eff')
for col_idx in range(len(system_names)):
if same_plot:
extra_space = 0.1
else:
# Make space for confidence intervals.
extra_space = 1
ylimits = (statistic_ranges[statistic_name][0] - extra_space,
statistic_ranges[statistic_name][1] + extra_space)
axes[row_idx][col_idx].set_ylim(ylimits)
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
axes[-1][1].set_xlabel('Number of force/energy evaluations [10$^6$]')
# Set labels and axes limits.
if not same_plot:
fig.suptitle(submission.paper_name)
output_file_base_name = 'releff-{}-{}'.format(submission.file_name, submission.receipt_id)
output_file_base_path = os.path.join(figure_dir_path, output_file_base_name)
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
if same_plot:
for row_idx in range(len(statistic_names)):
for col_idx in range(len(system_names)):
axes[row_idx][col_idx].set_xlim((0, max_n_energy_evaluations[col_idx]))
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
output_file_base_path = os.path.join(figure_dir_path, 'relative-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def plot_absolute_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000):
sns.set_style('whitegrid')
sns.set_context('paper')
# Keep track of data range by statistic.
statistic_names = ['std', 'absolute bias', 'RMSE']
# Keep track of maximum number of energy evaluations
# to determine plotting range for YANK.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
max_n_energy_eval = {name: 0 for name in system_names}
# Create figure.
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
for submission in submissions + [yank_analysis]:
if 'REVO' in submission.paper_name:
continue
print(submission.paper_name)
# Obtain std, bias, and RMSE of the 5 trajectories.
# If this is a YANK analysis, we get it later specifically for the system.
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
color = SUBMISSION_COLORS[submission.paper_name]
for col_idx, system_name in enumerate(system_names):
# GROMACS/EE doesn't have submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and 'OA' in system_name:
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Select the submission data for only this host-guest system.
if isinstance(submission, YankSamplingAnalysis):
line_style = '--'
mean_data = submission.get_free_energies_from_energy_evaluations(
max_n_energy_eval[system_name], system_name=system_name, mean_trajectory=True)
else:
line_style = '-'
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values
max_n_energy_eval[system_name] = max(max_n_energy_eval[system_name], n_energy_evaluations[-1])
# Discard initial computational costs for which there's no data.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY])[0][0]
n_energy_evaluations = n_energy_evaluations[first_nonzero_idx:]
# Compute cumulative total std, abs_bias, and RMSE.
scale_energy_evaluations = 1e6
norm_factor = (n_energy_evaluations - n_energy_evaluations[0])[1:] / scale_energy_evaluations
avg_std = sp.integrate.cumtrapz(mean_data['std'].values[first_nonzero_idx:]) / norm_factor
avg_abs_bias = sp.integrate.cumtrapz(np.abs(mean_data['bias'].values[first_nonzero_idx:])) / norm_factor
avg_rmse = sp.integrate.cumtrapz(mean_data['RMSE'].values[first_nonzero_idx:]) / norm_factor
# Plot total statistics as a function of the energy evaluations.
# Discard first energy evaluation as cumtrapz doesn't return a result for it.
for row_idx, avg_stats in enumerate([avg_std, avg_abs_bias, avg_rmse]):
ax = axes[row_idx, col_idx]
ax.plot(n_energy_evaluations[1:] / scale_energy_evaluations, avg_stats,
color=color, label=submission.paper_name, ls=line_style)
# Set x axis.
ax.set_xlim((0, n_energy_evaluations[-1] / scale_energy_evaluations))
# Set labels and axes limits.
y_limits = {
'std': (0, 0.4),
'absolute bias': (0, 0.3),
'RMSE': (0, 0.4)
}
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
# Set y limits (shared for each row).
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][col_idx].set_ylim(y_limits[statistic_name])
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
# # Remove shared ticks.
# for row_idx in range(len(statistic_names)):
# for col_idx in range(len(system_names)):
# if col_idx > 0:
# axes[row_idx][col_idx].set_yticklabels([])
# if row_idx < len(statistic_names)-1:
# axes[row_idx][col_idx].set_xticklabels([])
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel('mean ' + statistic_name + ' [kcal/mol]')
axes[-1][1].set_xlabel('N energy evaluations [M]')
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_base_path = os.path.join(figure_dir_path, 'absolute-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def print_relative_efficiency_table(
submissions, yank_analysis, ci=0.95,
n_bootstrap_samples=100,
print_bias_corrected=False
):
"""Create a table with standard deviation, absolute bias, and RMSE relative efficiency."""
methods = []
# Initialize the table to be converted into a Pandas dataframe.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
statistic_names = [r'$e_{\mathrm{std}}$', r'$e_{|\mathrm{bias}|}$', r'$e_{\mathrm{RMSD}}$']
column_names = ['\\makecell{$\Delta$ G \\\\ $[$kcal/mol$]$}', '\\makecell{n eval \\\\ $[$M$]$}'] + statistic_names
# Add columns.
efficiency_table = collections.OrderedDict()
for system_name, column_name in itertools.product(system_names, column_names):
efficiency_table[(system_name, column_name)] = []
for submission in submissions:
# Collect method's names in the given order.
methods.append(submission.paper_name)
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge yet, and the
# long protocol in CS-NS calculations have been run only on CB8-G3.
if ((submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3') or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3')):
relative_efficiencies, relative_efficiencies_corrected = np.full((2, 3), fill_value=np.nan)
dg = ''
n_force_eval = ''
else:
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
relative_efficiencies, cis = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
# Recompute relative efficiencies assuming that YANK converged.
if print_bias_corrected:
relative_efficiencies_corrected, cis_corrected = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples)
# Select the data for only this host-guest system.
mean_data_sub = mean_free_energies[mean_free_energies['System name'] == system_name]
# Get the final free energy and number of energy/force evaluations.
dg = mean_data_sub[DG_KEY].values[-1]
dg_CI = mean_data_sub['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = mean_data_sub['N energy evaluations'].values[-1]
# Convert to string format.
dg = '{} $\\pm$ {}'.format(dg, dg_CI)
n_force_eval = str(int(round(n_force_eval / 1e6)))
# Add free energy and cost entries.
efficiency_table[(system_name, column_names[0])].append(dg)
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# Add efficiency entries for the table.
for statistic_idx, statistic_name in enumerate(statistic_names):
# Gather the format arguments.
rel_effs = [relative_efficiencies[statistic_idx], cis[statistic_idx][0], cis[statistic_idx][1]]
if print_bias_corrected:
rel_effs.append(relative_efficiencies_corrected[statistic_idx])
# Comment this if we don't want to print CIs for the corrected estimate.
rel_effs.extend([cis_corrected[statistic_idx][0], cis_corrected[statistic_idx][1]])
# Print significant digits.
efficiencies_format = []
for e_idx in range(0, len(rel_effs), 3):
rel_eff, low_bound, high_bound = rel_effs[e_idx:e_idx+3]
if high_bound - rel_eff < 0.1 or rel_eff - low_bound < 0.1:
fmt = '{:2.2f}'
else:
fmt = '{:2.1f}'
# Print lower and higher bound as sub and superscripts of the estimate.
efficiencies_format.append(fmt + '$_{{\raisem{{2pt}}{{' + fmt + '}}}}^{{\mathstrut ' + fmt + '}}$')
if np.isnan(rel_effs[0]):
data_entry = ''
# Standard deviation efficiency is not affected by the bias.
elif print_bias_corrected and ('std' not in statistic_name):
data_entry = efficiencies_format[0] + ' (' + efficiencies_format[1] + ')'
data_entry = data_entry.format(*rel_effs)
else:
data_entry = efficiencies_format[0].format(*rel_effs[:3])
# Remove the minus sign from "-0".
data_entry = data_entry.replace('-0.0', '0.0')
data_entry = data_entry.replace('-0.00', '0.00')
efficiency_table[(system_name, statistic_name)].append(data_entry)
# Add row for reference calculation.
methods.append(YANK_METHOD_PAPER_NAME)
# Add free energy and cost entries.
for system_name in system_names:
yank_mean_data = yank_analysis.get_free_energies_from_iteration(
YANK_N_ITERATIONS, system_name=system_name, mean_trajectory=True)
dg = yank_mean_data[DG_KEY].values[-1]
dg_CI = yank_mean_data['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = yank_mean_data['N energy evaluations'].values[-1]
n_force_eval = str(int(round(n_force_eval / 1e6)))
efficiency_table[(system_name, column_names[0])].append('{} $\\pm$ {}'.format(dg, dg_CI))
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# All efficiencies are relative to YANK so they're all 1.
for system_name, statistic_name in itertools.product(system_names, statistic_names):
efficiency_table[(system_name, statistic_name)].append('0.0')
# Convert to Pandas Dataframe.
efficiency_table = pd.DataFrame(efficiency_table)
# Set the method's names as index column.
efficiency_table = efficiency_table.assign(Method=methods)
efficiency_table.set_index(keys='Method', inplace=True)
# Print table.
column_format = 'lccccc|ccccc|ccccc'
efficiency_table_latex = efficiency_table.to_latex(column_format=column_format, multicolumn_format='c',
escape=False)
# Make header and reference method bold.
textbf = lambda s: '\\textbf{' + s + '}'
efficiency_table_latex = efficiency_table_latex.replace(YANK_METHOD_PAPER_NAME, textbf(YANK_METHOD_PAPER_NAME))
efficiency_table_latex = efficiency_table_latex.replace('Method', textbf('Method'))
for system_name in system_names:
efficiency_table_latex = efficiency_table_latex.replace(system_name, textbf(system_name))
for column_name in column_names:
efficiency_table_latex = efficiency_table_latex.replace(column_name, textbf(column_name))
print(efficiency_table_latex)
def print_nonequilibrium_relative_efficiencies(nonequilibrium_submissions):
"""Print relative efficiencies w.r.t. for the nonequilibrium estimators table."""
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
def _get_free_energy_array(submission, system_name, step=1, max_c=100, get_asymptotic=False):
n_replicates = 5
system_data = submission.data[submission.data['System name'] == system_name]
free_energy_array = np.empty(shape=(n_replicates, int(max_c/step)))
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = system_data[system_data['System ID'] == system_id]
free_energy_array[i] = system_id_data[DG_KEY].values[:max_c:step]
if get_asymptotic:
mean_free_energies = submission.mean_free_energies()
asymptotic = mean_free_energies[mean_free_energies['System name'] == system_name][DG_KEY].values[-1]
return free_energy_array, asymptotic
return free_energy_array
# Use GROMACS/NS-DS/SB-long as reference method.
reference_submission = [s for s in nonequilibrium_submissions if s.paper_name == 'GROMACS/NS-DS/SB-long'][0]
# Also remove the other BAR submission.
nonequilibrium_submissions = [s for s in nonequilibrium_submissions if 'GROMACS/NS-DS/SB' not in s.paper_name]
# Get only the first 50 as the 1-directional estimators only have half the cost.
free_energy_ref = {}
asymptotic_ref = {}
for system_name in system_names:
DG, asympt = _get_free_energy_array(reference_submission, system_name, max_c=50, get_asymptotic=True)
free_energy_ref[system_name] = DG
asymptotic_ref[system_name] = asympt
for submission in nonequilibrium_submissions:
print(submission.paper_name, end='')
for system_name in system_names:
free_energy_sub = _get_free_energy_array(submission, system_name, step=2)
rel_eff, cis = compute_all_relative_efficiencies(
free_energy_ref[system_name], free_energy_sub, ci=0.95, n_bootstrap_samples=1000,
asymptotic_free_energy_A=asymptotic_ref[system_name],
asymptotic_free_energy_B=asymptotic_ref[system_name]
)
for i, stat_name in enumerate(['std', 'bias', 'RMSE']):
print(r' & {:.1f}$_{{\raisem{{2pt}}{{{:.1f}}}}}^{{\mathstrut {:.1f}}}$'.format(rel_eff[i], cis[i][0], cis[i][1]), end='')
print(r' \\')
def print_final_prediction_table(submissions, yank_analysis):
"""Plot the table containing the fina binding free energy predictions for all replicates."""
for submission in submissions + [yank_analysis]:
# GROMACS/EE-fullequil predictions are identical to GROMACS/EE
if submission.paper_name == 'GROMACS/EE-fullequil':
continue
if isinstance(submission, YankSamplingAnalysis):
submission_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS)
else:
submission_data = submission.data
submission_data = submission_data[submission_data['Simulation percentage'] == 100]
row_str = submission.paper_name + ' & '
submission_final_DGs = []
for system_id in submission_data['System ID'].unique():
# GROMACS/EE doesn't have predictions for CB8-G3, and the
# GROMACS/NS-DS/SB-long protocol was applied only to CB8-G3.
if (('GROMACS/EE' in submission.paper_name and 'CB8-G3' in system_id) or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and 'OA' in system_id)):
submission_final_DGs.append('')
continue
dg = submission_data.loc[submission_data['System ID'] == system_id, DG_KEY].values[0]
ddg = submission_data.loc[submission_data['System ID'] == system_id, DDG_KEY].values[0]
dg, ddg = reduce_to_first_significant_digit(dg, ddg)
submission_final_DGs.append(r'{} $\pm$ {}'.format(dg, ddg))
row_str += ' & '.join(submission_final_DGs) + r' \\'
print(row_str)
# =============================================================================
# SUPPORTING INFORMATION - SINGLE TRAJECTORIES
# =============================================================================
def plot_single_trajectories_figures(axes, system_data, system_mean_data,
reference_system_mean_data=None,
plot_errors=True, plot_methods_uncertainties=True):
"""Plot individual free energy trajectories and standard deviations for a single method and system."""
system_name = system_data['System name'].unique()[0]
palette_mean = sns.color_palette('pastel')
submission_mean_color = 'black'
reference_mean_color = palette_mean[9]
# Plot the method uncertainties of the single replicate trajectories.
# First scale the number of energy evaluations.
system_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
# Plot the 5 replicates individual trajectories.
# First remove the initial predictions that are 0.0 (i.e. there is no estimate).
ax = axes[0]
system_data = system_data[system_data[DG_KEY] != 0.0]
sns.lineplot(data=system_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(system_mean_data, x='N energy evaluations', ax=ax,
color_mean=submission_mean_color, plot_ci=False,
color_ci=submission_mean_color, label='Best estimate',
scale_n_energy_evaluations=True)
# Plot YANK mean trajectory with CI.
if reference_system_mean_data is not None:
plot_mean_free_energy(reference_system_mean_data, x='N energy evaluations', ax=ax,
color_mean=reference_mean_color, plot_ci=False,
color_ci=reference_mean_color, label='Reference estimate',
scale_n_energy_evaluations=True)
ax.set_title(system_name)
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
# Remove the legend for now, which will be added at the end after tighting up the plot.
ax.get_legend().remove()
# Create a bias axis.
if reference_system_mean_data is not None:
ref_free_energy = reference_free_energies.loc[system_name, DG_KEY]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to make the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if system_name == 'OA-G6':
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
if plot_errors:
# The x-axis is shared between the 2 rows so we can plot the ticks only in the bottom one.
ax.xaxis.set_ticklabels([])
ax.set_xlabel('')
ax = axes[1]
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if plot_methods_uncertainties:
sns.lineplot(data=system_data, x='N energy evaluations', y=DDG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# The legend is added later at the top.
ax.get_legend().remove()
# Plot the standard deviation of the free energy trajectories.
# submission_std = system_mean_data['std']
submission_std = system_mean_data['unbiased_std']
# cost = system_mean_data['Simulation percentage'].values
cost = system_mean_data['N energy evaluations'].values / N_ENERGY_EVALUATIONS_SCALE
ax.plot(cost, submission_std, color=submission_mean_color)
# Plot confidence interval around standard deviation.
submission_std_low_ci = system_mean_data['unbiased_std_low_CI'].values
submission_std_up_ci = system_mean_data['unbiased_std_up_CI'].values
ax.fill_between(cost, submission_std_low_ci, submission_std_up_ci, alpha=0.35, color='gray')
if reference_system_mean_data is not None:
# reference_std = reference_system_mean_data['std']
reference_std = reference_system_mean_data['unbiased_std']
ax.plot(cost, reference_std, color=reference_mean_color)
# Only the central plot shows the x-label.
ax.set_xlabel('')
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
else:
ax.set_ylabel('std($\Delta$G) [kcal/mol]')
# Set x limits.
for ax in axes:
ax.set_xlim((0, max(system_data['N energy evaluations'])))
def plot_all_single_trajectories_figures(submissions, yank_analysis, plot_errors=True, output_path_dir=None):
"""Individual plots for each method with the 5 individual free energy and uncertainty trajectories."""
sns.set_style('whitegrid')
sns.set_context('paper')
if output_path_dir is None:
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-individual-trajectories/')
os.makedirs(output_path_dir, exist_ok=True)
# -------------------- #
# Plot submission data #
# -------------------- #
# Remove nonequilibrium-switching calculations with single-direction estimators.
submissions = [s for s in submissions if ('Jarz' not in s.paper_name and 'Gauss' not in s.paper_name)]
for submission in submissions + [yank_analysis]:
# CB8-G3 calculations for GROMACS/EE did not converge yet.
if submission.name == 'Expanded-ensemble/MBAR':
submission.data = submission.data[submission.data['System name'] != 'CB8-G3']
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if 'REVO' in submission.paper_name:
plot_methods_uncertainties = False
else:
plot_methods_uncertainties = True
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
unique_system_names = submission.data['System name'].unique()
else:
unique_system_names = sorted(submission.system_names)
# Create a figure with 3 axes (one for each system).
n_systems = len(unique_system_names)
if plot_errors:
# The second row will plot the errors.
fig, axes = plt.subplots(nrows=2, ncols=n_systems, figsize=(7.25, 4.8))
trajectory_axes = axes[0]
else:
fig, axes = plt.subplots(nrows=1, ncols=n_systems, figsize=(7.25, 2.4))
trajectory_axes = axes
# Set figure title.
fig.suptitle(submission.paper_name)
# Determine range of data across systems.
min_DG = np.inf
max_DG = -np.inf
min_dDG = np.inf
max_dDG = -np.inf
# for system_name in unique_system_names:
for ax_idx, system_name in enumerate(unique_system_names):
if isinstance(submission, YankSamplingAnalysis):
data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
else:
# Select the data for only this host-guest system.
data = submission.data[submission.data['System name'] == system_name]
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
plot_single_trajectories_figures(axes[:,ax_idx], data, mean_data, plot_errors=plot_errors,
reference_system_mean_data=None,
plot_methods_uncertainties=plot_methods_uncertainties)
# Collect max and min data to determine axes range.
min_DG = min(min_DG, min(data[DG_KEY]), min(mean_data[DG_KEY]))
max_DG = max(max_DG, max(data[DG_KEY]), max(mean_data[DG_KEY]))
min_dDG = min(min_dDG, min(data[DDG_KEY]), min(mean_data['std']))
max_dDG = max(max_dDG, max(data[DDG_KEY]), max(mean_data['std']))
# Set limits.
for i in range(len(unique_system_names)):
axes[0][i].set_ylim((min_DG, max_DG))
axes[1][i].set_ylim((min_dDG, max_dDG))
# Keep ticks only in external plots.
axes[0][i].set_xticklabels([])
for i in range(1, len(unique_system_names)):
axes[0][i].set_yticklabels([])
axes[1][i].set_yticklabels([])
# The x-label is shown only in the central plot.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.2, rect=[0.0, 0.0, 1.0, 0.85])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = trajectory_axes[0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (-0.1, 1.35)
trajectory_axes[0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'replicates-{}-{}'.format(submission.file_name, submission.receipt_id)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# SUPPORTING INFORMATION - HREX/MBAR STATISTICAL INEFFICIENCY ANALYSIS
# =============================================================================
def plot_hrex_stat_ineff_trajectories():
"""Individual plots for HREX with the 5 individual free energy and uncertainty trajectories
as a function of the statistical inefficiency."""
sns.set_context('paper')
# Limits of y-axis (free energies, uncertainties) by system.
y_limits = {
'CB8-G3': [(-14, -10), (0, 2)],
'OA-G3': [(-9, -5), (0, 1.5)],
'OA-G6': [(-9, -5), (0, 1.5)],
}
# Create output dir.
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-statistical-inefficiency')
os.makedirs(output_path_dir, exist_ok=True)
# Read the data, which is organized by statistical inefficiency.
# We'll then plot by system.
yank_analysis_by_statineff = collections.OrderedDict()
for stat_ineff in ['5', '10', '20', '50', '100', '200']:
data_dir_path = os.path.join('YankAnalysis', 'CorrelationAnalysis', 'statineff-{}'.format(stat_ineff))
yank_analysis = YankSamplingAnalysis(data_dir_path)
yank_analysis_by_statineff[stat_ineff] = yank_analysis
# Plot by system.
for system_name in ['CB8-G3', 'OA-G3', 'OA-G6']:
fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(7.25, 9.8))
# Set figure title.
fig.suptitle('HREX uncertainty predictions as a function of\n'
'statistical inefficiency for {}'.format(system_name))
# for system_name in unique_system_names:
for stat_ineff_idx, stat_ineff in enumerate(yank_analysis_by_statineff):
yank_analysis = yank_analysis_by_statineff[stat_ineff]
data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
# Plot on the correct axis.
DG_row = 2*int(stat_ineff_idx / 3)
col = stat_ineff_idx % 3
stat_ineff_axes = axes[DG_row:DG_row+2, col]
plot_single_trajectories_figures(stat_ineff_axes, data, mean_data, plot_errors=True,
reference_system_mean_data=None,
plot_methods_uncertainties=True)
# Set titles and limits.
title = 'Statistical inefficiency: {} ps'.format(stat_ineff)
if DG_row > 0:
title = '\n' + title
stat_ineff_axes[0].set_title(title, fontweight='bold')
stat_ineff_axes[0].set_ylim(y_limits[system_name][0])
stat_ineff_axes[1].set_ylim(y_limits[system_name][1])
stat_ineff_axes[0].set_ylabel('$\Delta$G [kcal/mol]')
stat_ineff_axes[1].set_ylabel('std($\Delta$G) [kcal/mol]')
# Keep ticks only in external plots.
for row_idx in range(axes.shape[0]):
for col_idx in range(axes.shape[1]):
if row_idx != len(axes[0]) - 1:
axes[row_idx][col_idx].set_xticklabels([])
if col_idx != 0:
axes[row_idx][col_idx].set_ylabel('')
axes[row_idx][col_idx].set_yticklabels([])
# Set x label.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.0, rect=[0.0, 0.0, 1.0, 0.88])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = axes[0][0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (0.05, 1.35)
axes[0][0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'statineff-{}'.format(system_name)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
sns.set_style('whitegrid')
sns.set_context('paper')
# Read reference values.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
# Obtain free energies and final reference values.
mean_reference_free_energies = yank_analysis.get_free_energies_from_iteration(YANK_N_ITERATIONS, mean_trajectory=True)
reference_free_energies = mean_reference_free_energies[mean_reference_free_energies['Simulation percentage'] == 100]
reference_free_energies.set_index('System name', inplace=True)
# Compute efficiency of reference.
reference_efficiencies = {}
for system_name in mean_reference_free_energies['System name'].unique():
mean_data = mean_reference_free_energies[mean_reference_free_energies ['System name'] == system_name]
reference_efficiencies[system_name], n_discarded = fit_efficiency(mean_data)
# Import user map.
with open('../SubmissionsDoNotUpload/SAMPL6_user_map.csv', 'r') as f:
user_map = pd.read_csv(f)
# Load submissions data. We do OA and TEMOA together.
all_submissions = load_submissions(SamplingSubmission, SAMPLING_SUBMISSIONS_DIR_PATH, user_map)
# Remove AMBER/TI.
all_submissions = [s for s in all_submissions if s.name not in ['Langevin/Virtual Bond/TI']]
# Create an extra submission for GROMACS/EE where the full cost of equilibration has been taken into account.
gromacs_ee_submission = copy.deepcopy([s for s in all_submissions if s.paper_name == 'GROMACS/EE'][0])
gromacs_ee_submission.paper_name = 'GROMACS/EE-fullequil'
gromacs_ee_submission.file_name = 'EENVT-fullequil'
data = gromacs_ee_submission.data # Shortcut.
mean_free_energies = gromacs_ee_submission.mean_free_energies()
for system_name in ['OA-G3', 'OA-G6']:
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
full_equilibration_cost = mean_data['N energy evaluations'].values[first_nonzero_idx] * 4
for i in data[data['System name'] == system_name].index:
data.at[i, 'N energy evaluations'] += full_equilibration_cost
all_submissions.append(gromacs_ee_submission)
# Sort the submissions to have all pot and tables in the same order.
all_submissions = sorted(all_submissions, key=lambda s: s.paper_name)
# Separate the main submissions from the data about nonequilibrium estimators.
main_submissions = [s for s in all_submissions if not ('Jarz' in s.paper_name or 'Gauss' in s.paper_name)]
noneq_submissions = [s for s in all_submissions if 'NS' in s.paper_name]
# Export YANK analysis and submissions to CSV/JSON tables.
yank_analysis.export(os.path.join(SAMPLING_DATA_DIR_PATH, 'reference_free_energies'))
for s in main_submissions:
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, s.receipt_id + '-reference')
yank_analysis.export_by_submission(file_base_path, s)
export_submissions(all_submissions, reference_free_energies)
# Create example trajectory for the figure describing the challenge process.
plot_example_bias_variance(yank_analysis, max_n_eval_percentage=0.4, mixed_proportion=0.3)
# Cartoon explaining mean error and relative efficiency.
plot_mean_error_cartoon()
# Create figure with free energy, standard deviation, and bias as a function of computational cost.
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=False)
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=True)
# Create results and efficiency table.
print_relative_efficiency_table(main_submissions, yank_analysis, print_bias_corrected=False)
# Plot nonequilibrium-switching single-direction estimator.
plot_all_nonequilibrium_switching(noneq_submissions)
# Plot sensitivity analysis figure.
plot_restraint_and_barostat_analysis()
# Plot figure for HREX bias analysis.
plot_yank_bias()
# Supporting information
# ----------------------
# Absolute/relative efficiency as a function of the computational cost.
plot_relative_efficiencies(main_submissions, yank_analysis)
plot_relative_efficiencies(main_submissions, yank_analysis, ci=None, same_plot=True)
plot_absolute_efficiencies(main_submissions, yank_analysis)
# Relative efficiency for uni/bi-directional estimators.
print_nonequilibrium_relative_efficiencies(noneq_submissions)
# Plot replicate predictions table.
print_final_prediction_table(all_submissions, yank_analysis)
# Plot individual trajectories.
plot_all_single_trajectories_figures(all_submissions, yank_analysis)
# Plot statistical inefficiency analysis.
plot_hrex_stat_ineff_trajectories()
# Supporting information for bias section.
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-bias_hrex')
plot_decomposition('CB8-G3', starting_iteration=5, type='phase',
output_file_path=output_dir_path + '/free-energy-phase-decomposition.pdf'))
plot_decomposition('CB8-G3', starting_iteration=5, type='entropy-enthalpy',
output_file_path=output_dir_path + '/free-energy-entropy-decomposition.pdf')
| mit |
ClimbsRocks/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
hainm/statsmodels | examples/python/robust_models_1.py | 25 | 8588 |
## M-Estimators for Robust Linear Modeling
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
# * An M-estimator minimizes the function
#
# $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$
#
# where $\rho$ is a symmetric function of the residuals
#
# * The effect of $\rho$ is to reduce the influence of outliers
# * $s$ is an estimate of scale.
# * The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm
# * We have several choices available for the weighting functions to be used
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
#### Andrew's Wave
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
#### Hampel's 17A
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Huber's t
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
#### Least Squares
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
#### Ramsay's Ea
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
#### Trimmed Mean
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Tukey's Biweight
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Scale Estimators
# * Robust estimates of the location
x = np.array([1, 2, 3, 4, 500])
# * The mean is not a robust estimator of location
x.mean()
# * The median, on the other hand, is a robust estimator with a breakdown point of 50%
np.median(x)
# * Analagously for the scale
# * The standard deviation is not robust
x.std()
# Median Absolute Deviation
#
# $$ median_i |X_i - median_j(X_j)|) $$
# Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$
#
# $$\hat{\sigma}=K \cdot MAD$$
#
# where $K$ depends on the distribution. For the normal distribution for example,
#
# $$K = \Phi^{-1}(.75)$$
stats.norm.ppf(.75)
print(x)
sm.robust.scale.stand_mad(x)
np.array([1,2,3,4,5.]).std()
# * The default for Robust Linear Models is MAD
# * another popular choice is Huber's proposal 2
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDE(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.stand_mad(fat_tails)
sm.robust.stand_mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
#### Duncan's Occupational Prestige data - M-estimation for outliers
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.ix['minister'][['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.ix[np.abs(student) > 2])
print(infl.summary_frame().ix['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
#### Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
# * Data is on the luminosity and temperature of 47 stars in the direction of Cygnus.
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.ix[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
# * Why? Because M-estimators are not robust to leverage points.
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.ix[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort('unadj_p', inplace=True)
print(fdr2)
# * Let's delete that line
del ax.lines[-1]
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
# * MM estimators are good for this type of problem, unfortunately, we don't yet have these yet.
# * It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook.
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
get_ipython().magic(u'load_ext rmagic')
get_ipython().magic(u'R library(robustbase)')
get_ipython().magic(u'Rpush yy xx')
get_ipython().magic(u'R mod <- lmrob(yy ~ xx);')
get_ipython().magic(u'R params <- mod$coefficients;')
get_ipython().magic(u'Rpull params')
get_ipython().magic(u'R print(mod)')
print(params)
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='green')
#### Exercise: Breakdown points of M-estimator
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = map(se_loss, all_betas - beta_true)
##### Squared error loss
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
| bsd-3-clause |
Unidata/MetPy | v0.6/_downloads/Find_Natural_Neighbors_Verification.py | 3 | 2729 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumradius of the
circumcenter of a circumscribed circle containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.gridding.triangles import find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters and circumradii, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, tri_info = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can then use the information in tri_info later.
#
# The dictionary key is the index of a particular triangle in the Delaunay triangulation data
# structure. 'cc' is that triangle's circumcenter, and 'r' is the radius of the circumcircle
# containing that triangle.
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenter and radius information from tri_info, plot circumcircles and
# circumcenters for each triangle.
for _idx, item in tri_info.items():
ax.plot(item['cc'][0], item['cc'][1], 'k.', markersize=5)
circ = plt.Circle(item['cc'], item['r'], edgecolor='k', facecolor='none',
transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-IPython.py | 1 | 1076 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Tested with IPython 4.0.0.
from PyInstaller.compat import modname_tkinter, is_win, is_darwin
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
# Ignore 'matplotlib'. IPython contains support for matplotlib.
# Ignore GUI libraries. IPython supports integration with GUI frameworks.
# Assume that it will be imported by any other module when the user really
# uses it.
excludedimports = ['gtk', 'matplotlib', 'PyQt4', 'PyQt5', 'PySide']
# IPython uses 'tkinter' for clipboard access on Linux/Unix. Exclude it on Windows and OS X.
if is_win or is_darwin:
excludedimports.append(modname_tkinter)
datas = collect_data_files('IPython')
| gpl-3.0 |
bundgus/python-playground | matplotlib-playground/examples/event_handling/looking_glass.py | 1 | 1280 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
x, y = np.random.rand(2, 200)
fig, ax = plt.subplots()
circ = patches.Circle((0.5, 0.5), 0.25, alpha=0.8, fc='yellow')
ax.add_patch(circ)
ax.plot(x, y, alpha=0.2)
line, = ax.plot(x, y, alpha=1.0, clip_path=circ)
class EventHandler(object):
def __init__(self):
fig.canvas.mpl_connect('button_press_event', self.onpress)
fig.canvas.mpl_connect('button_release_event', self.onrelease)
fig.canvas.mpl_connect('motion_notify_event', self.onmove)
self.x0, self.y0 = circ.center
self.pressevent = None
def onpress(self, event):
if event.inaxes != ax:
return
if not circ.contains(event)[0]:
return
self.pressevent = event
def onrelease(self, event):
self.pressevent = None
self.x0, self.y0 = circ.center
def onmove(self, event):
if self.pressevent is None or event.inaxes != self.pressevent.inaxes:
return
dx = event.xdata - self.pressevent.xdata
dy = event.ydata - self.pressevent.ydata
circ.center = self.x0 + dx, self.y0 + dy
line.set_clip_path(circ)
fig.canvas.draw()
handler = EventHandler()
plt.show()
| mit |
tomevans/pyphotom | photom_class.py | 1 | 14267 | import numpy as np
import matplotlib.pyplot as plt
import pdb
import os
import cPickle
import numpy as np
import shutil
from photom import photom_inspect, photom_reduce, photom_absolute, photom_relative, photom_checks, photom_optimise
homestr = os.path.expanduser( '~' )
class photom():
"""
"""
def __init__(self):
"""
Initalise a default photom object.
"""
self.analysis_dir = ''
self.nstars = None
self.image_list = None
self.bias_list = None
self.dark_list = None
self.flat_list = None
self.ccdproc_params = 'default'
self.master_bias = None
self.master_dark = None
self.master_flat = None
self.red_image_list = None
self.nimages_total = None
self.nimages_good = None
self.goodbad_flags = None
self.coords_input_files = None
self.coords_input_type = None
self.photpars = 'default'
self.fitskypars = 'default'
self.centerpars = 'default'
self.datapars = 'default'
self.dat_files = None
self.absphot_file = None
self.relphot_file = None
return None
def set_attributes( self, analysis_dir=None, image_list=None, bias_list=None, dark_list=None, \
flat_list=None, ccdproc_params=None, ap_params=None, master_bias=None, \
master_dark=None, master_flat=None, red_image_list=None, nimages_total=None, \
nimages_good=None, goodbad_flags=None, nstars=None, coords_input_files=None, \
coords_input_type=None, photpars=None, fitskypars=None, centerpars=None, \
datapars=None, dat_files=None, absphot_file=None, relphot_file=None ):
"""
Set photom object parameters.
"""
if analysis_dir!=None: self.analysis_dir = analysis_dir.replace( '~', homestr )
if self.analysis_dir=='': self.analysis_dir = os.getcwd()
if image_list!=None:
if (os.path.dirname(image_list)==''):
self.image_list = str(self.analysis_dir+'/'+image_list).replace('//','/')
else:
self.image_list = image_list
if red_image_list!=None:
if (os.path.dirname(red_image_list)==''):
self.red_image_list = str(self.analysis_dir+'/'+red_image_list).replace('//','/')
else:
self.red_image_list = red_image_list
if bias_list!=None:
if (os.path.dirname(bias_list)==''):
self.bias_list = str(self.analysis_dir+'/'+bias_list).replace('//','/')
else:
self.bias_list = bias_list
if dark_list!=None:
if (os.path.dirname(dark_list)==''):
self.dark_list = str(self.analysis_dir+'/'+dark_list).replace('//','/')
else:
self.dark_list = dark_list
if flat_list!=None:
if (os.path.dirname(flat_list)==''):
self.flat_list = str(self.analysis_dir+'/'+flat_list).replace('//','/')
else:
self.flat_list = flat_list
if coords_input_files!=None:
if np.rank(coords_input_files)==0:
self.coords_input_files = [str(self.analysis_dir+'/'+coords_input_files).replace('//','/')]
else:
self.coords_input_files = []
for coords_input in coords_input_files:
if os.path.dirname(coords_input)=='':
coords_input_full = str(self.analysis_dir+'/'+coords_input).replace('//','/')
else:
coords_input_full = coords_input
self.coords_input_files = self.coords_input_files+[coords_input_full]
if coords_input_type!=None: self.coords_input_type = coords_input_type
if ccdproc_params!=None: self.ccdproc_params = ccdproc_params
if ap_params!=None: self.ap_params = ap_params
if master_bias!=None: self.master_bias = master_bias
if master_dark!=None: self.master_dark = master_dark
if master_flat!=None: self.master_flat = master_flat
if red_image_list!=None: self.red_image_list = red_image_list
if goodbad_flags!=None: self.goodbad_flags = goodbad_flags
if nimages_total!=None: self.nimages_total = nimages_total
if nimages_good!=None: self.nimages_good = nimages_good
if nstars!=None: self.nstars = nstars
if photpars!=None: self.photpars = photpars
if fitskypars!=None: self.fitskypars = fitskypars
if centerpars!=None: self.centerpars = centerpars
if datapars!=None: self.datapars = datapars
if dat_files!=None: self.dat_files = dat_files
if absphot_file!=None: self.absphot_file = absphot_file
if relphot_file!=None: self.relphot_file = relphot_file
self.pickle_obj()
return None
def inspect_images( self, obstime_kw=None, iraf_display_mode='display' ):
"""
"""
photom_inspect.Main( self, obstime_kw=obstime_kw, iraf_display_mode=iraf_display_mode )
self.pickle_obj()
return None
def reduce_images( self, use_previous=False, ccdproc_ccdtype='default', ccdproc_overscan='default', \
ccdproc_trim='default', ccdproc_fixpix='default', ccdproc_illumcor='default', \
ccdproc_fringecor='default', ccdproc_readcor='default', ccdproc_scancor='default', \
ccdproc_interactive='default', ccdproc_biassec='default', ccdproc_trimsec='default' ):
"""
"""
if self.ccdproc_params=='custom':
photom_reduce.custom_ccdproc_params( ccdproc_ccdtype=ccdproc_ccdtype, ccdproc_overscan=ccdproc_overscan, \
ccdproc_trim=ccdproc_trim, ccdproc_fixpix=ccdproc_fixpix, \
ccdproc_illumcor=ccdproc_illumcor, ccdproc_fringecor=ccdproc_fringecor, \
ccdproc_readcor=ccdproc_readcor, ccdproc_scancor=ccdproc_scancor, \
ccdproc_interactive=ccdproc_interactive, ccdproc_biassec=ccdproc_biassec, \
ccdproc_trimsec=ccdproc_trimsec )
elif self.ccdproc_params=='default':
photom_reduce.default_ccdproc_params(self)
if use_previous==False:
photom_reduce.Main(self)
else:
self.self_update()
self.pickle_obj()
return None
def optimise_aperture( self, ap_size_trials, sky_annulus_trials, sky_dannulus, gain_kw=None, readnoise_kw=None, \
exptime_kw=None, obstime_kw=None, airmass_kw=None, ix_target=None, ix_comparisons=None ):
"""
Searches a grid of aperture radii and sky annulus radii for the combination that
minimises the scatter of the relative photometry.
"""
scatter_array = photom_optimise.Main( self, ap_size_trials, sky_annulus_trials, sky_dannulus, datapars_gain=gain_kw, \
datapars_readnoise=readnoise_kw, datapars_exposure=exptime_kw, \
datapars_obstime=obstime_kw, datapars_airmass=airmass_kw, ix_target=ix_target, \
ix_comparisons=ix_comparisons )
return scatter_array
def do_absphot( self, photpars_apertures='default', fitskypars_annulus='default', fitskypars_dannulus='default', \
fitskypars_salgorithm='default', centerpars_maxshift='default', centerpars_cbox='default', \
centerpars_minsnratio='default', datapars_gain='default', datapars_readnoise='default', \
datapars_exposure='default', datapars_obstime='default', datapars_airmass='default', make_plots=True ):
"""
Does absolute photometry for one or more stars given a list of images.
Output is generated in the form of two types of file:
1. starX_absphot.dat for X=0,1,2,... files contain columns with the
more detailed output for each of the stars, with each line
corresponding to a different image.
2. absolute.phot file containing the important numerical columns for
each of the stars; it's supposed to be the most convenient output
for use with numpy and for generating relative photometry.
Summary plots are also generated by default:
Figure 1:
** Top left = traces of xy drift for each of the stars
** Bottom left = airmass versus time
** Top right = absolute flux versus time for each star
** Bottom right = sky annulus value as a function of time for each star
Figure 2:
?? Plots image number versus measured scatter divided by the calculated Poisson noise ??
"""
if self.photpars=='custom':
photom_absolute.custom_photpars( self, photpars_apertures=photpars_apertures )
elif self.photpars=='default':
photom_absolute.default_photpars( self )
if self.fitskypars=='custom':
photom_absolute.custom_fitskypars( self, fitskypars_annulus=fitskypars_annulus, fitskypars_dannulus=fitskypars_dannulus, \
fitskypars_salgorithm=fitskypars_salgorithm )
elif self.fitskypars=='default':
photom_absolute.default_fitskypars( self )
if self.centerpars=='custom':
photom_absolute.custom_centerpars( self, centerpars_maxshift=centerpars_maxshift, centerpars_cbox=centerpars_cbox, \
centerpars_minsnratio=centerpars_minsnratio )
elif self.centerpars=='default':
photom_absolute.default_centerpars( self )
if self.datapars=='custom':
photom_absolute.custom_datapars( self, datapars_gain=datapars_gain, datapars_readnoise=datapars_readnoise, \
datapars_exposure=datapars_exposure, datapars_obstime=datapars_obstime, \
datapars_airmass=datapars_airmass )
elif self.datapars=='default':
photom_absolute.default_datapars( self )
photom_absolute.Main( self, make_plots=make_plots )
self.pickle_obj()
return None
def do_relphot( self, ix_target=None, ix_comparisons=None, make_plots=True ):
"""
Calculates relative fluxes using absolute photometry already stored in the photom object.
Must specify indices for the target star and comparison stars to be used, using the format
0,1,2,... etc where 0 is the first star.
"""
photom_relative.Main( self, ix_target=ix_target, ix_comparisons=ix_comparisons, make_plots=make_plots )
self.pickle_obj()
return None
def check_relphot( self ):
"""
Does two basic checks of the relative photometry in an effort to identify
variable comparison stars. Output is in the form of plots that must be
visually inspected to identify variable stars.
The two types of checks are:
1. All possible pairs of stars that can be made up from the target
and comparisons are checked.
2. A leave-one-out approach is taken, where the relative photometry is
repeated multiple times, with a different comparison star excluded
each time.
"""
photom_checks.comparisons( self )
return None
def update_auxvars( self, headerkws=None ):
"""
Will update the auxiliary variables within the photom object. This is done
using the photometry already contained in the object to calculate the total
sum of all stellar fluxes, as well as extracting header information from
images stored in the red_images_list variable.
"""
photom_checks.auxiliary_variables( self, headerkws=headerkws )
return None
def self_update( self ):
"""
Routine used to generate default values for a few variables, eg. if
a certain analysis step, such as reduce_images(), has already been
performed and so does not need to be repeated.
!!NOTE: This routine is currently pretty ad-hoc and could possibly do with a
rethink plus the addition of various features that have been added to the
overall pipeline since I first wrote this particular routine a while back.
"""
# Count the total number of images:
try:
red_images = np.loadtxt(self.red_image_list, dtype='str')
self.nimages_total = len(red_images)
except:
pass
# Set the goodbad_flags to all be good:
if self.goodbad_flags==None:
try:
self.goodbad_flags = np.ones(self.nimages_total)
except:
pass
# Count the number of good images:
self.nimages_good = int(np.sum(self.goodbad_flags))
self.pickle_obj()
return None
def pickle_obj( self, quiet=False ):
"""
Pickle the photom object in its current state. Saves the output as photom_object.pkl in the
analysis directory.
"""
outfile_name = str( self.analysis_dir + '/photom_object.pkl' ).replace( '//', '/' )
outfile_open = open( outfile_name, 'w' )
cPickle.dump( self, outfile_open )
outfile_open.close()
if quiet==False:
print '\nSaved %s\n' % outfile_name
self.pickled_output = outfile_name
return None
def backup_the_pickle( self ):
"""
Makes a backup of the current photom_object.pkl, saving the backed up version as photom_object.pkl.BACKUP
in the analysis directory.
"""
pkl_name = str( self.analysis_dir + '/photom_object.pkl' ).replace( '//', '/' )
shutil.copyfile( pkl_name, pkl_name + '.BACKUP' )
print '\nBacked up pickled photom object'
return None
| gpl-2.0 |
KECB/learn | computer_vision/12_rmv_salt_pepper_median_blur.py | 1 | 1464 | import numpy as np
import cv2
import matplotlib.pyplot as plt
# load in image and add Salt and pepper noise
moon = cv2.imread('images/moon.png', 0)
######################################################## ADD SALT & PEPPER NOISE
# salt and peppering manually (randomly assign coords as either white or black)
rows, cols = moon.shape
salt_vs_pepper_ratio = 0.5
amount = 0.01
moon_salted_and_peppered = moon.copy()
num_salt = np.ceil(amount * moon.size * salt_vs_pepper_ratio)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in moon.shape]
moon_salted_and_peppered[coords] = 255
num_pepper = np.ceil(amount * moon.size * (1 - salt_vs_pepper_ratio))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in moon.shape]
moon_salted_and_peppered[coords] = 0
############################################ APPLY MEDIAN FILTER TO REMOVE NOISE
# The second argument is the aperture linear size; it must be odd and greater
# than 1, for example: 3, 5, 7
moon_median = cv2.medianBlur(moon, 3)
# show all three images using Matplotlib
plt.figure(figsize=(15, 6))
plt.subplot(1, 3, 1)
plt.imshow(moon, cmap='gray'), plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(moon_salted_and_peppered, cmap='gray')
plt.title('Salted & Peppered'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(moon_median, cmap='gray'), plt.title('Median Blur on S&P')
plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
| mit |
mop/LTPTextDetector | scripts/pw_analyze/svmdelme.py | 1 | 5600 | import numpy as np
from sklearn.cross_validation import cross_val_score, ShuffleSplit
from sklearn.svm import LinearSVC, SVC
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_recall_fscore_support
import matplotlib.pyplot as plt
data = np.genfromtxt('dists_cleaned.csv', delimiter=',')
#samples = np.bitwise_or(np.bitwise_and(data[:,0] == -1, data[:,1] <= 2), data[:,0]==1)
#data = data[samples,:]
#data = data[data[:,7]>=5,:]
pos = data[data[:,0]==1,:]
neg = data[data[:,0]==-1,:]
a = 0.33974138
b = 0.47850904
c = -0.56307525
xrg = np.linspace(0,5,100)
yrg = np.linspace(0,5,100)
X,Y = np.meshgrid(xrg,yrg)
Z = a * X + b * Y + c
print Z
plt.scatter(neg[:,3], neg[:,4], color='r')
plt.scatter(pos[:,3], pos[:,4], color='b')
plt.contour(X,Y,Z)
plt.xlabel('medians')
plt.ylabel('heights')
plt.show()
#data[data[:,0]==1,0] = 2
#data[data[:,0]==-1,0] = 1
#data[data[:,0]==2,0] = -1
#print data.shape
#data = data[data[:,1]>0,:]
#data = data[data[:,2]>0,:]
#print data.shape
cv = ShuffleSplit(data.shape[0], n_iter=10, random_state=4)
min_dists = [1,2,3,4,5,100]
c1_grid = np.logspace(0,2,10)
c2_grid = np.logspace(0,4,15)
class_weights = {1:5, -1:1}
beta = 1.0
#best_params = {}
#best_fscore = 0
#for d in min_dists:
# for C1 in c1_grid:
# for C2 in c2_grid:
# precisions = []
# recalls = []
# fscores = []
# accs = []
# for (train_idx, test_idx) in cv:
# X = data[train_idx,3:5]
# y = data[train_idx,0]
#
# svm1 = LinearSVC(random_state=42, C=C1, class_weight=class_weights)
# svm1.fit(X,y)
#
# X_simple = data[train_idx, 4]
# X_simple = X_simple.reshape((train_idx.shape[0], 1))
# svm2 = LinearSVC(random_state=42, C=C2, class_weight=class_weights)
# svm2.fit(X_simple, y)
#
# #ys = svm2.predict(data[test_idx, 4].reshape((test_idx.shape[0], 1)))
# #accs.append(svm2.score(data[test_idx, 4].reshape((test_idx.shape[0], 1)), data[test_idx,0]))
#
# ys = np.zeros((test_idx.shape[0],))
# for i,idx in enumerate(test_idx):
# if data[idx,-1] >= d:
# ys[i] = svm1.predict(data[idx, 3:5].reshape((1,2)))
# else:
# ys[i] = svm2.predict(data[idx, 4].reshape((1,1)))
#
# acc = np.sum(data[test_idx,0] == ys) / float(test_idx.shape[0])
# accs.append(acc)
# ps, rs, fs, ss = precision_recall_fscore_support(data[test_idx,0], ys, beta=beta)
# precisions.append(ps[1])
# recalls.append(rs[1])
# fscores.append(fs[1])
# print 'C1: %f, C2: %f, d: %f, prec: %f, recall: %f, f-score: %f, acc: %f' % (C1, C2, d, np.mean(precisions), np.mean(recalls), np.mean(fscores), np.mean(accs))
# if np.mean(fscores) > best_fscore:
# print '*'
# best_fscore = np.mean(fscores)
# best_params = {
# 'C1': C1,
# 'C2': C2,
# 'd': d
# }
#
#best_params = {}
#best_fscore = 0
#print data.shape
#for C1 in c1_grid:
# precisions = []
# recalls = []
# fscores = []
# accs = []
# for (train_idx, test_idx) in cv:
# X = data[train_idx,3:5]
# y = data[train_idx,0]
#
# svm1 = LinearSVC(random_state=4,C=C1, class_weight=class_weights)
# svm1.fit(X,y)
#
# ys = np.zeros((test_idx.shape[0],))
# for i,idx in enumerate(test_idx):
# ys[i] = svm1.predict(data[idx, 3:5].reshape((1,2)))
#
# acc = np.sum(data[test_idx,0] == ys) / float(test_idx.shape[0])
# accs.append(acc)
# ps, rs, fs, ss = precision_recall_fscore_support(data[test_idx,0], ys, beta=beta)
# precisions.append(ps[1])
# recalls.append(rs[1])
# fscores.append(fs[1])
# svm = LinearSVC(random_state=4,C=C1, class_weight=class_weights)
# svm.fit(data[:,3:5], data[:,0])
# print 'C1: %f, prec: %f, recall: %f, f-score: %f, acc: %f' % (C1, np.mean(precisions), np.mean(recalls), np.mean(fscores), np.mean(accs))
# print svm.coef_
# print svm.intercept_
# if np.mean(fscores) > best_fscore:
# print '*'
# best_fscore = np.mean(fscores)
# best_params = {
# 'C1': C1
# }
#print 'best:'
#print best_params
#svm = SVC(kernel='linear', C=best_params['C1'], class_weight=class_weights)
#svm.fit(data[:,1:3], data[:,0])
#print svm.coef_
#print svm.intercept_
#svm = SVC(kernel='linear', C=best_params['C1'], class_weight=class_weights)
#svm.fit(data[:,3:5], data[:,0])
#print svm.coef_
#print svm.intercept_
#svm = SVC()
#search = GridSearchCV(svm, {
# 'kernel': ('rbf',), 'C': [1,10,100,1000],
# 'gamma': [0.05, 0.1, 0.25, 0.128, 0.5, 1.0, 1.5],
# 'class_weight': ({1:1,-1:1},),
# }, cv=cv)
svm = LinearSVC()
search = GridSearchCV(svm, {
'C': np.logspace(0,4,15).tolist(),
'class_weight': (
{1:1,1:1},
{1:1,-1:2},
{1:1,-1:3},
{1:1,-1:4},
{-1:1,1:2},
{-1:1,1:3},
{-1:1,1:5},{1:4,-1:1},{1:5,-1:1})
}, cv=cv, refit=False)
search.fit(data[:,3:5], data[:,0],cv=cv)
print search
print search.best_score_
print search.best_params_
svm = LinearSVC(random_state=42,**search.best_params_)
svm.fit(data[:,1:3],data[:,0])
print svm.score(data[:,1:3],data[:,0])
print svm.coef_
print svm.intercept_
| gpl-3.0 |
vorasagar7/sp17-i524 | project/S17-IR-P001/code/ansible/ansible-node/files/visualization/FinalScript.py | 4 | 15339 | #Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import pandas as pd
from textblob import TextBlob
from time import strptime
import numpy as np
import re
import time
import zipcode
import sys, errno
from nltk.corpus import stopwords
from itertools import combinations
from collections import Counter
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import nltk
nltk.download('stopwords')
runCount=0
#Variables that contains the user credentials to access Twitter API
access_token = ""
access_token_secret = ""
consumer_key = ""
consumer_secret = ""
tweets_data = []
stop = stopwords.words('english') + ['and']
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
Count=0
stop = stopwords.words('english')
def create_dataframe(tweets_data):
tweets = pd.DataFrame(index=range(len(tweets_data)),
columns=['text','created_at','location','state','sentiment','sentiment_cat','country_code','hour'])
for i in range(len(tweets_data)):
try:
tweets['text'][i] = tweets_data[i]['text']
except:
tweets['text'][i] = ""
try:
tweets['location'][i]=tweets_data[i]['user']['location']
except:
tweets['location'][i]='NA'
try:
tweets['country_code'][i]=tweets_data[i]['place']['country_code']
except:
tweets['country_code'][i]=''
try:
lon=tweets_data[i]['place']['bounding_box']['coordinates'][0][0][0]
except:
lon='NA'
try:
lat=tweets_data[i]['place']['bounding_box']['coordinates'][0][0][1]
except:
lat='NA'
#print (lat,lon)
try:
tweets['created_at'][i]=tweets_data[i]['created_at']
except:
tweets['created_at'][i]='NA'
try:
tweets['hour'][i]=tweets['created_at'][i][11:13]
except:
tweets['hour'][i]='NA'
try:
stateFromData=tweets['location'][i].split(',')[1]
except:
stateFromData=''
if len(stateFromData)==2:
tweets['state'][i]=stateFromData
else:
if lat!='NA':
radius=10
incre=10
zips=zipcode.isinradius((lat,lon),radius)
while len(zips)==0:
radius=radius+incre
zips=zipcode.isinradius((lat,lon),radius)
incre=incre+10
myzip = zipcode.isequal(str(zips[0].zip))
tweets['state'][i]=myzip.state
else:
tweets['state'][i]='NA'
blob = TextBlob(tweets['text'][i])
try:
sentence=blob.sentences[0]
tweets['sentiment'][i]=float(sentence.sentiment.polarity)
except:
tweets['sentiment'][i]=0
if tweets['sentiment'][i] < 0:
tweets['sentiment_cat'][i] = 'Neg'
else:
if tweets['sentiment'][i] > 0:
tweets['sentiment_cat'][i] = 'Pos'
else:
tweets['sentiment_cat'][i] = 'Neu'
print (tweets.head())
return tweets
def state_senti(newFolder,usStateSentiOld,tweetsFinal):
output2=pd.DataFrame({'value' : tweetsFinal.groupby( [ "State","sentiment_cat"] ).size()}).reset_index()
outData=pd.pivot_table(output2,values='value', index=['State'], columns=['sentiment_cat'], aggfunc=np.sum)
outData=outData.fillna(0)
outData['State']=outData.index
#outData.reset_index()
print (outData.columns.values)
outData = pd.merge(usStateSentiOld, outData, how='left', left_on='State', right_on = 'State')
outData=outData.fillna(0)
outData['Pos']=outData['Pos_x']+outData['Pos_y']
del outData['Pos_x']
del outData['Pos_y']
outData['Neg']=outData['Neg_x']+outData['Neg_y']
del outData['Neg_x']
del outData['Neg_y']
outData['Neu']=outData['Neu_x']+outData['Neu_y']
del outData['Neu_x']
del outData['Neu_y']
outData.to_csv(newFolder+"usStates-SentiCount.csv",index=False)
#-------------------------------------------
try:
outData['sum']=outData[['Neg', 'Neu', 'Pos']].sum(axis=1)
outData['max']=outData['maxFinal']=outData[['Neg', 'Neu', 'Pos']].idxmax(axis=1)
except:
outData['sum']=outData[['Neu', 'Pos']].sum(axis=1)
outData['max']=outData['maxFinal']=outData[[ 'Neu', 'Pos']].idxmax(axis=1)
#-------------------------------------------
for i in range(len(outData)):
if outData['max'][i] =="Pos":
outData['maxFinal'][i] = '1'
else:
if outData['max'][i] =="Neu":
outData['maxFinal'][i] = '-1'
else:
outData['maxFinal'][i] = '2'
del outData['max']
d="var data =[\n"
for i in range(len(outData)):
row=outData.ix[i]
#print (row)
d += "[\'"+row['State']+"\',"+",".join([str(i) for i in row[:5]])+"],\n"
return d+']'
def create_timechart(newFolder,oldtimedata,tweets):
td1 = pd.DataFrame({'value' : tweets.groupby( [ "created_at"] ).size()}).reset_index()
td1['created_at'] = td1['created_at'].astype('str')
mask = (td1['created_at'].str.len() > 2)
td1=td1.loc[mask]
timedata = td1[td1.created_at != 'NA']
timedata=oldtimedata.append(timedata, ignore_index=True)
timedata.to_csv(newFolder+"timeseries.csv",index=False)
data1 ={}
data = ["var data=["]
for i in range(0,len(timedata)):
year = timedata['created_at'][i][-4:]
if (timedata['created_at'][i][4:7] == 'Jan'):
mon = '1'
else:
if (timedata['created_at'][i][4:7] == 'Feb'):
mon = '2'
else:
if (timedata['created_at'][i][4:7] == 'Mar'):
mon = '3'
else:
if (timedata['created_at'][i][4:7] == 'Apr'):
mon = '4'
else:
if (timedata['created_at'][i][4:7] == 'May'):
mon = '5'
else:
if (timedata['created_at'][i][4:7] == 'Jun'):
mon = '6'
else:
if (timedata['created_at'][i][4:7] == 'Jul'):
mon = '7'
else:
if (timedata['created_at'][i][4:7] == 'Aug'):
mon = '8'
else:
if (timedata['created_at'][i][4:7] == 'Sep'):
mon = '9'
else:
if (timedata['created_at'][i][4:7] == 'Oct'):
mon = '10'
else:
if (timedata['created_at'][i][4:7] == 'Nov'):
mon = '11'
else:
mon = '12'
date = timedata['created_at'][i][7:10]
hour = timedata['created_at'][i][10:13]
minu = timedata['created_at'][i][14:16]
sec = timedata['created_at'][i][17:20]
value = timedata['value'][i]
data1 = ("[Date.UTC("+str(year)+","+str(mon)+","+str(date)+","+str(hour)+","+str(minu)+","+str(sec)+"),"+str(value)+"]")
if (len(timedata)):
data.append
data.append(data1)
data = ",\n".join(data)+"\n]"
data = data.replace("[,","[")
return data
def tokenize(s):
tokens=tokens_re.findall(s)
return [ x for x in tokens if 'http' not in x and len(x)>1 and x.lower() not in stop]
def preprocess(s, lowercase=True):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else token.lower() for token in tokens]
return tokens
def collect_pairs(lines):
pair_counter = Counter()
for line in lines:
unique_tokens = sorted(set(line)) # exclude duplicates in same line and sort to ensure one word is always before other
combos = combinations(unique_tokens, 2)
pair_counter += Counter(combos)
return pair_counter
#Co-occurrence:
def co_occur(tweets):
t2 = []
t1 =tweets['text']
for t in range(len(t1)):
t2.append(preprocess(t1[t]))
pairs = collect_pairs(t2)
top_pairs = pairs.most_common(200)
nodes={}
links=["\"links\":["]
count =0
len_top=len(top_pairs)
nptp = np.array(top_pairs)
maxtp = np.max(nptp[:,1])
for p in range(len(top_pairs)):
for i in range(2):
if top_pairs[p][0][i] not in nodes:
nodes[top_pairs[p][0][i]] = count
count+=1
link="{ \"source\":"+str(nodes[top_pairs[p][0][0]])+",\"target\":"+str(nodes[top_pairs[p][0][1]])+",\"value\":"+str(round(top_pairs[p][1]*10/maxtp))+"}"
links.append(link)
links=",\n".join(links)+"\n]"
links=links.replace("[,","[")
nodes = sorted(nodes.items(), key=lambda x: x[1])
nodes1=["\"nodes\":["]
for p in range(len(nodes)):
nodes1.append("{ \"name\":\""+nodes[p][0]+"\",\"group\":"+"0}")
nodes1=",\n".join(nodes1)+"\n]"
nodes1=nodes1.replace("[,","[")
return nodes1,links
def heatworldgrid(newFolder,worldOld,tweets):
contdata=pd.read_csv("continents.txt")
contdat=contdata.fillna("NA")
tweets['sentiment']=tweets['sentiment'].apply(pd.to_numeric)
#print (tweets.dtypes)
Countryhour=pd.DataFrame({'sentiment' : tweets.groupby( ["country_code","hour"] )['sentiment'].mean()}).reset_index()
final=pd.merge(Countryhour, contdata, how='left',left_on="country_code",right_on="country")
print (final.columns.values)
del final['country']
#del final['Unnamed: 0']
del final['country_code']
Conthour=pd.DataFrame({'sentiment' : final.groupby( ["continent","hour"] )['sentiment'].mean()}).reset_index()
Conthour = pd.merge(worldOld, Conthour, how='left', left_on=["continent","hour"] , right_on = ["continent","hour"] )
Conthour=Conthour.fillna(0)
Conthour['sentiment']=(Conthour['sentiment_x']*1000000+Conthour['sentiment_y']*10000)/(1010000)
del Conthour['sentiment_x']
del Conthour['sentiment_y']
Conthour.to_csv(newFolder+"Continent-hour-senti.csv",index=False)
minVal=min(Conthour['sentiment'])
maxVal=max(Conthour['sentiment'])
outputStr=""
uniqueCont= list(np.unique(Conthour['continent']))
outputStr+="var continent =["+",".join(["'"+i+"'" for i in uniqueCont])+"];\n"
numCont=len(uniqueCont)
numHour=24
outputStr+="var hour =["+",".join(["'"+str(i)+"'" for i in range(numHour)])+"];\n"
outMatrix=np.zeros(shape=(numCont,numHour))
outputStr+="var data=["
datastr=[]
for i in range(len(Conthour)):
continent=Conthour['continent'][i]
hour=Conthour['hour'][i]
contIndex=uniqueCont.index(continent)
outMatrix[contIndex][int(hour)]=Conthour['sentiment'][i]
for i in range(numCont):
for j in range(numHour):
datastr.append("["+str(j)+","+str(i)+","+str(int(outMatrix[i][j]))+"]")
outputStr+=",".join(datastr)+"]; var minval = "+str(minVal)+";\n var maxval = "+str(maxVal)+";"
return outputStr
def createwordcloud(tweets):
# Read the whole text.
#text = open(path.join(d, 'constitution.txt')).read()
textpos = tweets[tweets.sentiment_cat == 'Pos']
textneg = tweets[tweets.sentiment_cat == 'Neg']
postweets=""
for i in textpos.index.values:
postweets+=textpos['text'][i]+" "
negtweets=""
for i in textneg.index.values:
negtweets+=textneg['text'][i]+" "
textp = preprocess(postweets)
textp=" ".join(textp)
textn = preprocess(negtweets)
textn=" ".join(textn)
wordcloudp = WordCloud( stopwords=stop,background_color='white',width=1200,height=1000).generate(textp)
wordcloudn = WordCloud( stopwords=stop,background_color='white', width=1200,height=1000).generate(textn)
image1 = wordcloudp.to_image()
image2= wordcloudn.to_image()
image1.save("wordcloup.png")
image2.save("wordcloudn.png")
def analyze(tweets_data):
oldFolder="Data\\"
outputFolder="OutputJS\\"
newFolder="NewData\\"
#Dataframe is created from the list of json tweets, sentiment is also calculated
tweets=create_dataframe(tweets_data)
statedata=pd.read_csv(oldFolder+"states.csv")
tweetsFinal=pd.merge(tweets, statedata, how='left',left_on="state",right_on="Abbreviation")
#UsStatewise Tweets
usStateOld=pd.read_csv(oldFolder+"usStatesCount.csv")
usState=pd.DataFrame({'value' : tweetsFinal.groupby( [ "State"] ).size()}).reset_index()
usState_new = pd.merge(usStateOld, usState, how='left', left_on='State', right_on = 'State')
usState_new=usState_new.fillna(0)
usState_new['value']=usState_new['value_x']+usState_new['value_y']
del usState_new['value_x']
del usState_new['value_y']
usState_new.to_csv(newFolder+"usStatesCount.csv",index=False)
print (usState_new.head())
usStateJson=usState_new.to_json(orient = "records")
usStateJsonfinalOutput=usStateJson[33:len(usStateJson)-1].upper().replace("\"STATE\"","ucName").replace("\"VALUE\"","value")
with open('Final\\US_heat_count\\data.js', 'w') as outfile:
outfile.write(usStateJsonfinalOutput)
#UsStatewise Sentiment
usStateSentiOld=pd.read_csv(oldFolder+"usStates-SentiCount.csv")
statesentiout=state_senti(newFolder,usStateSentiOld,tweetsFinal)
with open('Final\\map-pies\\data.js', 'w') as outfile:
outfile.write(statesentiout)
#TimeSeries Chart
timeOld=pd.read_csv(oldFolder+"timeseries.csv")
timedata=create_timechart(newFolder,timeOld,tweets)
with open('Final\\dynamic-master-detail\\time_series.js', 'w') as outfile:
outfile.write(timedata)
#Co-occur Chart
nodes1,links=co_occur(tweets)
with open(outputFolder+'cooccur_word-1.json', 'w') as outfile:
outfile.write("{\n"+nodes1+",\n"+links+"}\n")
#Heat World Grid
worldOld=pd.read_csv(oldFolder+"Continent-hour-senti.csv")
heatjson=heatworldgrid(newFolder,worldOld,tweets)
with open('Final\\heatmap\\heatchart_data-1.js', 'w') as outfile:
outfile.write(heatjson)
#WordCloud
createwordcloud(tweets)
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
global Count,tweets_data
Count+=1
#TweetCount+=1
if Count%1000==0:
print ("Analyze data started")
x=time.time()
analyze(tweets_data)
print ("Analyze data Completed in ", time.time()-x)
sys.exit(errno.EACCES)
tweets_data=[]
Count=0
tweet = json.loads(data)
tweets_data.append(tweet)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(languages=["en"],track=['a', 'e', 'i','o','u','#']) | apache-2.0 |
mikekestemont/ruzicka | code/04latin_test_o2.py | 1 | 3340 | from __future__ import print_function
import os
import time
import json
import pickle
import sys
from itertools import product, combinations
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from ruzicka.utilities import binarize
from ruzicka.vectorization import Vectorizer
from ruzicka.utilities import load_pan_dataset, train_dev_split, get_vocab_size
from sklearn.cross_validation import train_test_split
from ruzicka.score_shifting import ScoreShifter
from ruzicka.evaluation import pan_metrics
from ruzicka.Order2Verifier import Order2Verifier as Verifier
import ruzicka.art as art
# run script for top-5 metrics
ngram_type = 'word'
ngram_size = 1
base = 'profile'
vector_space = 'tf_std'
metric = 'cosine'
nb_bootstrap_iter = 100
rnd_prop = 0.5
nb_imposters = 30
mfi = sys.maxint
min_df = 2
# get imposter data:
train_data, _ = load_pan_dataset('../data/latin/dev') # ignore unknown documents
train_labels, train_documents = zip(*train_data)
# get test data:
test_data, _ = load_pan_dataset('../data/latin/test') # ignore unknown documents
test_labels, test_documents = zip(*test_data)
# fit encoder for author labels:
label_encoder = LabelEncoder()
label_encoder.fit(train_labels+test_labels)
train_ints = label_encoder.transform(train_labels)
test_ints = label_encoder.transform(test_labels)
# fit vectorizer:
vectorizer = Vectorizer(mfi = mfi,
vector_space = vector_space,
ngram_type = ngram_type,
ngram_size = ngram_size)
vectorizer.fit(train_documents+test_documents)
train_X = vectorizer.transform(train_documents).toarray()
test_X = vectorizer.transform(test_documents).toarray()
cols = ['label']
for test_author in sorted(set(test_ints)):
auth_label = label_encoder.inverse_transform([test_author])[0]
cols.append(auth_label)
proba_df = pd.DataFrame(columns=cols)
for idx in range(len(test_documents)):
target_auth = test_ints[idx]
target_docu = test_X[idx]
non_target_test_ints = np.array([test_ints[i] for i in range(len(test_ints)) if i != idx])
non_target_test_X = np.array([test_X[i] for i in range(len(test_ints)) if i != idx])
tmp_train_X = np.vstack((train_X, non_target_test_X))
tmp_train_y = np.hstack((train_ints, non_target_test_ints))
tmp_test_X, tmp_test_y = [], []
for t_auth in sorted(set(test_ints)):
tmp_test_X.append(target_docu)
tmp_test_y.append(t_auth)
# fit the verifier:
verifier = Verifier(metric = metric,
base = base,
nb_bootstrap_iter = nb_bootstrap_iter,
rnd_prop = rnd_prop)
verifier.fit(tmp_train_X, tmp_train_y)
probas = verifier.predict_proba(test_X = tmp_test_X,
test_y = tmp_test_y,
nb_imposters = nb_imposters)
row = [label_encoder.inverse_transform([target_auth])[0]] # author label
row += list(probas)
print(row)
proba_df.loc[len(proba_df)] = row
proba_df = proba_df.set_index('label')
# write away score tables:
table_dir = '../output/tables/'
if not os.path.isdir(table_dir):
os.mkdir(table_dir)
proba_df.to_csv(table_dir+'lat_proba_'+metric+'_'+vector_space+'.csv')
| mit |
yasirkhan380/Tutorials | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
maxlikely/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 44 | 7031 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
"""Test partial dependence for classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
"""Test partial dependence for multi-class classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
"""Test partial dependence for regressor """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
"""Test input validation of partial dependence. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
"""Test partial dependence plot function. """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
"""Test partial dependence plot function input checks. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
"""Test partial dependence plot function on multi-class input. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
hrjn/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
OpenTrading/OpenTrader | setup.py | 1 | 2533 | #!/usr/bin/env python
import codecs
import os
import sys
import glob
from setuptools import setup, find_packages
try:
# http://stackoverflow.com/questions/21698004/python-behave-integration-in-setuptools-setup-py
from setuptools_behave import behave_test
except ImportError:
behave_test = None
dirname = os.path.dirname(__file__)
long_description = (
codecs.open(os.path.join(dirname, "README.creole"), encoding="utf-8").read() + "\n"
)
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {"packages": ["zmq"], "excludes": ["tkinter"]}
setup(
name="OpenTrader",
description="OpenTrader",
long_description=long_description,
author="Open Trading",
license="LGPL2 license",
url="https://www.github.com/OpenTrading/OpenTrader",
version='1.0',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: LGPL2 License",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Topic :: Office/Business :: Financial :: Investment",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 2",
] + [("Programming Language :: Python :: %s" % x) for x in "2.6 2.7".split()],
install_requires=[
"configobj",
"pandas",
"pyparsing",
# we'll make zmq default now
"zmq",
],
extras_require={'plotting': ["matplotlib"],
'pybacktest': ["pybacktest"],
'rabbit': ["pyrabbit"],
'doc': ["python-creole", "invoke"],
# we'll make zmq default now
# 'zmq': ["zmq"],
'amqp': ["pika"],
},
data_files=[('', ['README.creole']),
('OpenTrader', glob.glob('OpenTrader/*.ini')),
('OpenTrader/Omlettes', glob.glob('OpenTrader/Omlettes/*.ini'))],
options = {"build_exe": build_exe_options},
entry_points={
"console_scripts": [
"OTCmd2 = OpenTrader.OTCmd2:iMain",
"OTBackTest = OpenTrader.OTBackTest:iMain",
"OTPpnAmgc = OpenTrader.OTPpnAmgc:iMain",
]
},
tests_require=["behave>=1.2.5"],
cmdclass=behave_test and {"behave_test": behave_test,} or {},
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| lgpl-3.0 |
alekz112/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause |
Djabbz/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
raghavrv/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
perryjohnson/biplaneblade | sandia_blade_lib/prep_stn32_mesh.py | 1 | 10860 | """Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 32
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.spar_cap.layer['upper']
is1 = st.internal_surface_1.layer['resin']
points_usc = [
tuple(usc.left[0]), # SparCap_upper.txt
(usc.left[0][0], 0.1),
is1.polygon.interiors[0].coords[0], # InternalSurface1_resin.txt
tuple(usc.right[1]), # SparCap_upper.txt
(usc.right[1][0], 0.25),
(usc.left[0][0], 0.25)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.spar_cap.layer['lower']
points_lsc = [
tuple(lsc.left[1]),
(lsc.left[1][0], 0.0),
is1.polygon.interiors[0].coords[292-222], # InternalSurface1_resin.txt
tuple(lsc.right[0]), # SparCap_lower.txt
(lsc.right[0][0], -0.15),
(lsc.left[1][0], -0.15)
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.TE_reinforcement.layer['foam']
points_teu1 = [
(ter.top[0][0], 0.25), # TE_Reinforcement_foam.txt
tuple(ter.top[0]), # TE_Reinforcement_foam.txt
(0.47, 0.12),
is1.polygon.interiors[0].coords[457-222], # InternalSurface1_resin.txt
(is1.polygon.interiors[0].coords[457-222][0], 0.25) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -0.15), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]), # TE_Reinforcement_foam.txt
(0.47, -0.01),
(0.7, 0.05),
points_teu1[-2], # InternalSurface1_resin.txt
(points_teu1[-1][0], -0.15) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
is1t = st.internal_surface_1.layer['triax']
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
is1t.polygon.interiors[0].coords[364-176], # InternalSurface1_triax.txt
is1t.polygon.exterior.coords[24-3], # InternalSurface1_triax.txt
(is1t.polygon.exterior.coords[24-3][0], 0.25) # InternalSurface1_triax.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -0.1),
points_teu2[1],
points_teu2[2],
points_teu2[3],
(points_teu2[3][0], -0.1)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
teru = st.TE_reinforcement.layer['uniax']
est = st.external_surface.layer['triax']
esg = st.external_surface.layer['gelcoat']
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0],
teru.polygon.exterior.coords[0],
(est.polygon.exterior.coords[-1][0], 0.002),
est.polygon.exterior.coords[-2],
esg.polygon.exterior.coords[-2],
(esg.polygon.exterior.coords[-2][0], 0.25)
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -0.1),
points_teu3[1],
points_teu3[2],
points_teu3[3],
points_teu3[4],
est.polygon.exterior.coords[-1],
esg.polygon.exterior.coords[-1],
(points_teu3[4][0], -0.1)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.LE_panel.layer['foam']
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-0.7,-0.1),
(lep.bottom[0][0],-0.1),
(lep.bottom[0][0],0.25),
(-0.7, 0.25)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.LE_panel.layer['foam']
],
alt_TE_reinforcement=True,
soft_warning=False)
| gpl-3.0 |
kivy-garden/garden.matplotlib | backend_kivy.py | 1 | 50958 | '''
Backend Kivy
=====
.. image:: images/backend_kivy_example.jpg
:align: right
The :class:`FigureCanvasKivy` widget is used to create a matplotlib graph.
This widget has the same properties as
:class:`kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg`. FigureCanvasKivy
instead of rendering a static image, uses the kivy graphics instructions
:class:`kivy.graphics.Line` and :class:`kivy.graphics.Mesh` to render on the
canvas.
Installation
------------
The matplotlib backend for kivy can be used by using the garden extension in
kivy following this .. _link: http://kivy.org/docs/api-kivy.garden.html ::
garden install matplotlib
Or if you want to include it directly on your application ::
cd myapp
garden install --app matplotlib
Initialization
--------------
A backend can be initialized in two ways. The first one is using pure pyplot
as explained
.. _here: http://matplotlib.org/faq/usage_faq.html#what-is-a-backend::
import matplotlib
matplotlib.use('module://kivy.garden.matplotlib.backend_kivy')
Once this is done, any figure instantiated after will be wrapped by a
:class:`FigureCanvasKivy` ready to use. From here there are two options to
continue with the development.
1. Use the :class:`FigureCanvasKivy` attribute defined as canvas from Figure,
to embed your matplotlib graph in your own Kivy application as can be seen in
the first example in the following section.
.. warning::
One can create a matplotlib widget by importing FigureCanvas::
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvas
or
from kivy.garden.matplotlib.backend_kivy import FigureCanvas
and then instantiate an object::
fig, ax = plt.subplots()
my_mpl_kivy_widget = FigureCanvas(fig)
which will certainly work but a problem will arise if events were connected
before the FigureCanvas is instantiated. If this approach is taken please
connect matplotlib events after generating the matplotlib kivy widget
object ::
fig, ax = plt.subplots()
fig.canvas.mpl_connect('button_press_event', callback_handler)
my_mpl_kivy_widget = FigureCanvas(fig)
In this scenario button_press_event won't be connected with the object
being created in line 3, because will be connected to the default canvas
set by matplotlib. If this approach is taken be sure of connecting the
events after instantiation like the following: ::
fig, ax = plt.subplots()
my_mpl_kivy_widget = FigureCanvas(fig)
fig.canvas.mpl_connect('button_press_event', callback_handler)
2. Use pyplot to write the application following matplotlib sintax as can be
seen in the second example below. In this case a Kivy application will be
created automatically from the matplotlib instructions and a NavigationToolbar
will be added to the main canvas.
Examples
--------
1. Example of a simple Hello world matplotlib App::
fig, ax = plt.subplots()
ax.text(0.6, 0.5, "hello", size=50, rotation=30.,
ha="center", va="center",
bbox=dict(boxstyle="round",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
ax.text(0.5, 0.4, "world", size=50, rotation=-30.,
ha="right", va="top",
bbox=dict(boxstyle="square",
ec=(1., 0.5, 0.5),
fc=(1., 0.8, 0.8),
)
)
canvas = fig.canvas
The object canvas can be added as a widget into the kivy tree widget.
If a change is done on the figure an update can be performed using
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.draw`.::
# update graph
canvas.draw()
The plot can be exported to png with
:meth:`~kivy.ext.mpl.backend_kivyagg.FigureCanvasKivyAgg.print_png`, as an
argument receives the `filename`.::
# export to png
canvas.print_png("my_plot.png")
2. Example of a pyplot application using matplotlib instructions::
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
figure, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind + width, womenMeans, width, color='y', yerr=womenStd)
ax.set_ylabel('----------------------Scores------------------')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width)
ax.set_yticklabels(('Ahh', '--G1--', 'G2', 'G3', 'G4', 'G5', 'G5',
'G5', 'G5'), rotation=90)
ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
plt.draw()
plt.savefig("test.png")
plt.show()
Navigation Toolbar
-----------------
If initialized by the first step a :class:`NavigationToolbarKivy` widget can be
created as well by instantiating an object with a :class:`FigureCanvasKivy` as
parameter. The actual widget is stored in its actionbar attribute.
This can be seen in test_backend.py example ::
bl = BoxLayout(orientation="vertical")
my_mpl_kivy_widget1 = FigureCanvasKivy(fig1)
my_mpl_kivy_widget2 = FigureCanvasKivy(fig2)
nav1 = NavigationToolbar2Kivy(my_mpl_kivy_widget1)
nav2 = NavigationToolbar2Kivy(my_mpl_kivy_widget2)
bl.add_widget(nav1.actionbar)
bl.add_widget(my_mpl_kivy_widget1)
bl.add_widget(nav2.actionbar)
bl.add_widget(my_mpl_kivy_widget2)
Connecting Matplotlib events to Kivy Events
-----------------------
All matplotlib events are available: `button_press_event` which is raised
on a mouse button clicked or on touch down, `button_release_event` which is
raised when a click button is released or on touch up, `key_press_event` which
is raised when a key is pressed, `key_release_event` which is raised when a key
is released, `motion_notify_event` which is raised when the mouse is on motion,
`resize_event` which is raised when the dimensions of the widget change,
`scroll_event` which is raised when the mouse scroll wheel is rolled,
`figure_enter_event` which is raised when mouse enters a new figure,
`figure_leave_event` which is raised when mouse leaves a figure,
`close_event` which is raised when the window is closed,
`draw_event` which is raised on canvas draw,
`pick_event` which is raised when an object is selected,
`idle_event` (deprecated),
`axes_enter_event` which is fired when mouse enters axes,
`axes_leave_event` which is fired when mouse leaves axes.::
def press(event):
print('press released from test', event.x, event.y, event.button)
def release(event):
print('release released from test', event.x, event.y, event.button)
def keypress(event):
print('key down', event.key)
def keyup(event):
print('key up', event.key)
def motionnotify(event):
print('mouse move to ', event.x, event.y)
def resize(event):
print('resize from mpl ', event)
def scroll(event):
print('scroll event from mpl ', event.x, event.y, event.step)
def figure_enter(event):
print('figure enter mpl')
def figure_leave(event):
print('figure leaving mpl')
def close(event):
print('closing figure')
fig.canvas.mpl_connect('button_press_event', press)
fig.canvas.mpl_connect('button_release_event', release)
fig.canvas.mpl_connect('key_press_event', keypress)
fig.canvas.mpl_connect('key_release_event', keyup)
fig.canvas.mpl_connect('motion_notify_event', motionnotify)
fig.canvas.mpl_connect('resize_event', resize)
fig.canvas.mpl_connect('scroll_event', scroll)
fig.canvas.mpl_connect('figure_enter_event', figure_enter)
fig.canvas.mpl_connect('figure_leave_event', figure_leave)
fig.canvas.mpl_connect('close_event', close)
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import matplotlib
import matplotlib.transforms as transforms
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.backend_bases import ShowBase, Event
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.mathtext import MathTextParser
from matplotlib import rcParams
from hashlib import md5
from matplotlib import _png
from matplotlib import _path
try:
import kivy
except ImportError:
raise ImportError("this backend requires Kivy to be installed.")
from kivy.app import App
from kivy.graphics.texture import Texture
from kivy.graphics import Rectangle
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.actionbar import ActionBar, ActionView, \
ActionButton, ActionToggleButton, \
ActionPrevious, ActionOverflow, ActionSeparator
from kivy.base import EventLoop
from kivy.core.text import Label as CoreLabel
from kivy.core.image import Image
from kivy.graphics import Color, Line
from kivy.graphics import Rotate, Translate
from kivy.graphics.instructions import InstructionGroup
from kivy.graphics.tesselator import Tesselator
from kivy.graphics.context_instructions import PopMatrix, PushMatrix
from kivy.graphics import StencilPush, StencilPop, StencilUse,\
StencilUnUse
from kivy.logger import Logger
from kivy.graphics import Mesh
from kivy.resources import resource_find
from kivy.uix.stencilview import StencilView
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty
from kivy.uix.textinput import TextInput
from kivy.lang import Builder
from kivy.logger import Logger
from kivy.clock import Clock
from distutils.version import LooseVersion
_mpl_ge_1_5 = LooseVersion(matplotlib.__version__) >= LooseVersion('1.5.0')
_mpl_ge_2_0 = LooseVersion(matplotlib.__version__) >= LooseVersion('2.0.0')
import numpy as np
import io
import textwrap
import uuid
import numbers
from functools import partial
from math import cos, sin, pi
kivy.require('1.9.1')
toolbar = None
my_canvas = None
class SaveDialog(FloatLayout):
save = ObjectProperty(None)
text_input = ObjectProperty(None)
cancel = ObjectProperty(None)
class MPLKivyApp(App):
'''Creates the App initializing a FloatLayout with a figure and toolbar
widget.
'''
figure = ObjectProperty(None)
toolbar = ObjectProperty(None)
def build(self):
EventLoop.ensure_window()
layout = FloatLayout()
if self.figure:
self.figure.size_hint_y = 0.9
layout.add_widget(self.figure)
if self.toolbar:
self.toolbar.size_hint_y = 0.1
layout.add_widget(self.toolbar)
return layout
def draw_if_interactive():
'''Handle whether or not the backend is in interactive mode or not.
'''
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager:
figManager.canvas.draw_idle()
class Show(ShowBase):
'''mainloop needs to be overwritten to define the show() behavior for kivy
framework.
'''
def mainloop(self):
app = App.get_running_app()
if app is None:
app = MPLKivyApp(figure=my_canvas, toolbar=toolbar)
app.run()
show = Show()
def new_figure_manager(num, *args, **kwargs):
'''Create a new figure manager instance for the figure given.
'''
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
'''Create a new figure manager instance for the given figure.
'''
canvas = FigureCanvasKivy(figure)
manager = FigureManagerKivy(canvas, num)
global my_canvas
global toolbar
toolbar = manager.toolbar.actionbar if manager.toolbar else None
my_canvas = canvas
return manager
class RendererKivy(RendererBase):
'''The kivy renderer handles drawing/rendering operations. A RendererKivy
should be initialized with a FigureCanvasKivy widget. On initialization
a MathTextParser is instantiated to generate math text inside a
FigureCanvasKivy widget. Additionally a list to store clip_rectangles
is defined for elements that need to be clipped inside a rectangle such
as axes. The rest of the render is performed using kivy graphics
instructions.
'''
def __init__(self, widget):
super(RendererKivy, self).__init__()
self.widget = widget
self.dpi = widget.figure.dpi
self._markers = {}
# Can be enhanced by using TextToPath matplotlib, textpath.py
self.mathtext_parser = MathTextParser("Bitmap")
self.list_goraud_triangles = []
self.clip_rectangles = []
self.labels_inside_plot = []
def contains(self, widget, x, y):
'''Returns whether or not a point is inside the widget. The value
of the point is defined in x, y as kivy coordinates.
'''
left = widget.x
bottom = widget.y
top = widget.y + widget.height
right = widget.x + widget.width
return (left <= x <= right and
bottom <= y <= top)
def handle_clip_rectangle(self, gc, x, y):
'''It checks whether the point (x,y) collides with any already
existent stencil. If so it returns the index position of the
stencil it collides with. if the new clip rectangle bounds are
None it draws in the canvas otherwise it finds the correspondent
stencil or creates a new one for the new graphics instructions.
The point x,y is given in matplotlib coordinates.
'''
x = self.widget.x + x
y = self.widget.y + y
collides = self.collides_with_existent_stencil(x, y)
if collides > -1:
return collides
new_bounds = gc.get_clip_rectangle()
if new_bounds:
x = self.widget.x + int(new_bounds.bounds[0])
y = self.widget.y + int(new_bounds.bounds[1])
w = int(new_bounds.bounds[2])
h = int(new_bounds.bounds[3])
collides = self.collides_with_existent_stencil(x, y)
if collides == -1:
cliparea = StencilView(pos=(x, y), size=(w, h))
self.clip_rectangles.append(cliparea)
self.widget.add_widget(cliparea)
return len(self.clip_rectangles) - 1
else:
return collides
else:
return -2
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
'''Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
'''
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
# check whether an optimization is needed by calculating the cost of
# generating and use a path with the cost of emitting a path in-line.
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
# Generate an array of unique paths with the respective transformations
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
transform = Affine2D(transform.get_matrix()).scale(1.0, -1.0)
if _mpl_ge_2_0:
polygons = path.to_polygons(transform, closed_only=False)
else:
polygons = path.to_polygons(transform)
path_codes.append(polygons)
# Apply the styles and rgbFace to each one of the raw paths from
# the list. Additionally a transformation is being applied to
# translate each independent path
for xo, yo, path_poly, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
list_canvas_instruction = self.get_path_instructions(gc0, path_poly,
closed=True, rgbFace=rgbFace)
for widget, instructions in list_canvas_instruction:
widget.canvas.add(PushMatrix())
widget.canvas.add(Translate(xo, yo))
widget.canvas.add(instructions)
widget.canvas.add(PopMatrix())
def collides_with_existent_stencil(self, x, y):
'''Check all the clipareas and returns the index of the clip area that
contains this point. The point x, y is given in kivy coordinates.
'''
idx = -1
for cliparea in self.clip_rectangles:
idx += 1
if self.contains(cliparea, x, y):
return idx
return -1
def get_path_instructions(self, gc, polygons, closed=False, rgbFace=None):
'''With a graphics context and a set of polygons it returns a list
of InstructionGroups required to render the path.
'''
instructions_list = []
points_line = []
for polygon in polygons:
for x, y in polygon:
x = x + self.widget.x
y = y + self.widget.y
points_line += [float(x), float(y), ]
tess = Tesselator()
tess.add_contour(points_line)
if not tess.tesselate():
Logger.warning("Tesselator didn't work :(")
return
newclip = self.handle_clip_rectangle(gc, x, y)
if newclip > -1:
instructions_list.append((self.clip_rectangles[newclip],
self.get_graphics(gc, tess, points_line, rgbFace,
closed=closed)))
else:
instructions_list.append((self.widget,
self.get_graphics(gc, tess, points_line, rgbFace,
closed=closed)))
return instructions_list
def get_graphics(self, gc, polygons, points_line, rgbFace, closed=False):
'''Return an instruction group which contains the necessary graphics
instructions to draw the respective graphics.
'''
instruction_group = InstructionGroup()
if isinstance(gc.line['dash_list'], tuple):
gc.line['dash_list'] = list(gc.line['dash_list'])
if rgbFace is not None:
if len(polygons.meshes) != 0:
instruction_group.add(Color(*rgbFace))
for vertices, indices in polygons.meshes:
instruction_group.add(Mesh(
vertices=vertices,
indices=indices,
mode=str("triangle_fan")
))
instruction_group.add(Color(*gc.get_rgb()))
if _mpl_ge_1_5 and (not _mpl_ge_2_0) and closed:
points_poly_line = points_line[:-2]
else:
points_poly_line = points_line
if gc.line['width'] > 0:
instruction_group.add(Line(points=points_poly_line,
width=int(gc.line['width'] / 2),
dash_length=gc.line['dash_length'],
dash_offset=gc.line['dash_offset'],
dash_joint=gc.line['join_style'],
dash_list=gc.line['dash_list']))
return instruction_group
def draw_image(self, gc, x, y, im):
'''Render images that can be displayed on a matplotlib figure.
These images are generally called using imshow method from pyplot.
A Texture is applied to the FigureCanvas. The position x, y is
given in matplotlib coordinates.
'''
# Clip path to define an area to mask.
clippath, clippath_trans = gc.get_clip_path()
# Normal coordinates calculated and image added.
x = self.widget.x + x
y = self.widget.y + y
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.widget.width
h = self.widget.height
h, w = im.get_size_out()
rows, cols, image_str = im.as_rgba_str()
texture = Texture.create(size=(w, h))
texture.blit_buffer(image_str, colorfmt='rgba', bufferfmt='ubyte')
if clippath is None:
with self.widget.canvas:
Color(1.0, 1.0, 1.0, 1.0)
Rectangle(texture=texture, pos=(x, y), size=(w, h))
else:
if _mpl_ge_2_0:
polygons = clippath.to_polygons(clippath_trans, closed_only=False)
else:
polygons = clippath.to_polygons(clippath_trans)
list_canvas_instruction = self.get_path_instructions(gc, polygons,
rgbFace=(1.0, 1.0, 1.0, 1.0))
for widget, instructions in list_canvas_instruction:
widget.canvas.add(StencilPush())
widget.canvas.add(instructions)
widget.canvas.add(StencilUse())
widget.canvas.add(Color(1.0, 1.0, 1.0, 1.0))
widget.canvas.add(Rectangle(texture=texture,
pos=(x, y), size=(w, h)))
widget.canvas.add(StencilUnUse())
widget.canvas.add(StencilPop())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
'''Render text that is displayed in the canvas. The position x, y is
given in matplotlib coordinates. A `GraphicsContextKivy` is given
to render according to the text properties such as color, size, etc.
An angle is given to change the orientation of the text when needed.
If the text is a math expression it will be rendered using a
MathText parser.
'''
if mtext:
transform = mtext.get_transform()
ax, ay = transform.transform_point(mtext.get_position())
angle_rad = mtext.get_rotation() * np.pi / 180.
dir_vert = np.array([np.sin(angle_rad), np.cos(angle_rad)])
if mtext.get_rotation_mode() == "anchor":
# if anchor mode, rotation is undone first
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
ha, va = mtext.get_ha(), mtext.get_va()
if ha == "center":
ax -= w / 2
elif ha == "right":
ax -= w
if va == "top":
ay -= h
elif va == "center":
ay -= h / 2
if mtext.get_rotation_mode() != "anchor":
# if not anchor mode, rotation is undone last
v_offset = np.dot(dir_vert, [(x - ax), (y - ay)])
ax = ax + v_offset * dir_vert[0]
ay = ay + v_offset * dir_vert[1]
x, y = ax, ay
x += self.widget.x
y += self.widget.y
if ismath:
self.draw_mathtext(gc, x, y, s, prop, angle)
else:
font = resource_find(prop.get_name() + ".ttf")
color = gc.get_rgb()
if font is None:
plot_text = CoreLabel(font_size=prop.get_size_in_points(), color=color)
else:
plot_text = CoreLabel(font_size=prop.get_size_in_points(),
font_name=prop.get_name(), color=color)
plot_text.text = six.text_type("{}".format(s))
if prop.get_style() == 'italic':
plot_text.italic = True
if self.weight_as_number(prop.get_weight()) > 500:
plot_text.bold = True
plot_text.refresh()
with self.widget.canvas:
if isinstance(angle, float):
PushMatrix()
Rotate(angle=angle, origin=(int(x), int(y)))
Rectangle(pos=(int(x), int(y)), texture=plot_text.texture,
size=plot_text.texture.size)
PopMatrix()
else:
Rectangle(pos=(int(x), int(y)), texture=plot_text.texture,
size=plot_text.texture.size)
def draw_mathtext(self, gc, x, y, s, prop, angle):
'''Draw the math text using matplotlib.mathtext. The position
x,y is given in Kivy coordinates.
'''
ftimage, depth = self.mathtext_parser.parse(s, self.dpi, prop)
w = ftimage.get_width()
h = ftimage.get_height()
texture = Texture.create(size=(w, h))
if _mpl_ge_1_5:
texture.blit_buffer(ftimage.as_rgba_str()[0][0], colorfmt='rgba',
bufferfmt='ubyte')
else:
texture.blit_buffer(ftimage.as_rgba_str(), colorfmt='rgba',
bufferfmt='ubyte')
texture.flip_vertical()
with self.widget.canvas:
Rectangle(texture=texture, pos=(x, y), size=(w, h))
def draw_path(self, gc, path, transform, rgbFace=None):
'''Produce the rendering of the graphics elements using
:class:`kivy.graphics.Line` and :class:`kivy.graphics.Mesh` kivy
graphics instructions. The paths are converted into polygons and
assigned either to a clip rectangle or to the same canvas for
rendering. Paths are received in matplotlib coordinates. The
aesthetics is defined by the `GraphicsContextKivy` gc.
'''
if _mpl_ge_2_0:
polygons = path.to_polygons(transform, self.widget.width,
self.widget.height, closed_only=False)
else:
polygons = path.to_polygons(transform, self.widget.width,
self.widget.height)
list_canvas_instruction = self.get_path_instructions(gc, polygons,
closed=True, rgbFace=rgbFace)
for widget, instructions in list_canvas_instruction:
widget.canvas.add(instructions)
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
'''Markers graphics instructions are stored on a dictionary and
hashed through graphics context and rgbFace values. If a marker_path
with the corresponding graphics context exist then the instructions
are pulled from the markers dictionary.
'''
if not len(path.vertices):
return
# get a string representation of the path
path_data = self._convert_path(
marker_path,
marker_trans + Affine2D().scale(1.0, -1.0),
simplify=False)
# get a string representation of the graphics context and rgbFace.
style = str(gc._get_style_dict(rgbFace))
dictkey = (path_data, str(style))
# check whether this marker has been created before.
list_instructions = self._markers.get(dictkey)
# creating a list of instructions for the specific marker.
if list_instructions is None:
if _mpl_ge_2_0:
polygons = marker_path.to_polygons(marker_trans, closed_only=False)
else:
polygons = marker_path.to_polygons(marker_trans)
self._markers[dictkey] = self.get_path_instructions(gc,
polygons, rgbFace=rgbFace)
# Traversing all the positions where a marker should be rendered
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
for widget, instructions in self._markers[dictkey]:
widget.canvas.add(PushMatrix())
widget.canvas.add(Translate(x, y))
widget.canvas.add(instructions)
widget.canvas.add(PopMatrix())
def flipy(self):
return False
def _convert_path(self, path, transform=None, clip=None, simplify=None,
sketch=None):
if clip:
clip = (0.0, 0.0, self.width, self.height)
else:
clip = None
if _mpl_ge_1_5:
return _path.convert_to_string(
path, transform, clip, simplify, sketch, 6,
[b'M', b'L', b'Q', b'C', b'z'], False).decode('ascii')
else:
return _path.convert_to_svg(path, transform, clip, simplify, 6)
def get_canvas_width_height(self):
'''Get the actual width and height of the widget.
'''
return self.widget.width, self.widget.height
def get_text_width_height_descent(self, s, prop, ismath):
'''This method is needed specifically to calculate text positioning
in the canvas. Matplotlib needs the size to calculate the points
according to their layout
'''
if ismath:
ftimage, depth = self.mathtext_parser.parse(s, self.dpi, prop)
w = ftimage.get_width()
h = ftimage.get_height()
return w, h, depth
font = resource_find(prop.get_name() + ".ttf")
if font is None:
plot_text = CoreLabel(font_size=prop.get_size_in_points())
else:
plot_text = CoreLabel(font_size=prop.get_size_in_points(),
font_name=prop.get_name())
plot_text.text = six.text_type("{}".format(s))
plot_text.refresh()
return plot_text.texture.size[0], plot_text.texture.size[1], 1
def new_gc(self):
'''Instantiate a GraphicsContextKivy object
'''
return GraphicsContextKivy(self.widget)
def points_to_pixels(self, points):
return points / 72.0 * self.dpi
def weight_as_number(self, weight):
''' Replaces the deprecated matplotlib function of the same name
'''
# Return if number
if isinstance(weight, numbers.Number):
return weight
# else use the mapping of matplotlib 2.2
elif weight == 'ultralight':
return 100
elif weight == 'light':
return 200
elif weight == 'normal':
return 400
elif weight == 'regular':
return 400
elif weight == 'book':
return 500
elif weight == 'medium':
return 500
elif weight == 'roman':
return 500
elif weight == 'semibold':
return 600
elif weight == 'demibold':
return 600
elif weight == 'demi':
return 600
elif weight == 'bold':
return 700
elif weight == 'heavy':
return 800
elif weight == 'extra bold':
return 800
elif weight == 'black':
return 900
else:
raise ValueError('weight ' + weight + ' not valid')
class NavigationToolbar2Kivy(NavigationToolbar2):
'''This class extends from matplotlib class NavigationToolbar2 and
creates an action bar which is added to the main app to allow the
following operations to the figures.
Home: Resets the plot axes to the initial state.
Left: Undo an operation performed.
Right: Redo an operation performed.
Pan: Allows to drag the plot.
Zoom: Allows to define a rectangular area to zoom in.
Configure: Loads a pop up for repositioning elements.
Save: Loads a Save Dialog to generate an image.
'''
def __init__(self, canvas, **kwargs):
self.actionbar = ActionBar(pos_hint={'top': 1.0})
super(NavigationToolbar2Kivy, self).__init__(canvas)
self.rubberband_color = (1.0, 0.0, 0.0, 1.0)
self.lastrect = None
self.save_dialog = Builder.load_string(textwrap.dedent('''\
<SaveDialog>:
text_input: text_input
BoxLayout:
size: root.size
pos: root.pos
orientation: "vertical"
FileChooserListView:
id: filechooser
on_selection: text_input.text = self.selection and\
self.selection[0] or ''
TextInput:
id: text_input
size_hint_y: None
height: 30
multiline: False
BoxLayout:
size_hint_y: None
height: 30
Button:
text: "Cancel"
on_release: root.cancel()
Button:
text: "Save"
on_release: root.save(filechooser.path,\
text_input.text)
'''))
def _init_toolbar(self):
'''A Toolbar is created with an ActionBar widget in which buttons are
added with a specific behavior given by a callback. The buttons
properties are given by matplotlib.
'''
basedir = os.path.join(rcParams['datapath'], 'images')
actionview = ActionView()
actionprevious = ActionPrevious(title="Navigation", with_previous=False)
actionoverflow = ActionOverflow()
actionview.add_widget(actionprevious)
actionview.add_widget(actionoverflow)
actionview.use_separator = True
self.actionbar.add_widget(actionview)
id_group = uuid.uuid4()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
actionview.add_widget(ActionSeparator())
continue
fname = os.path.join(basedir, image_file + '.png')
if text in ['Pan', 'Zoom']:
action_button = ActionToggleButton(text=text, icon=fname,
group=id_group)
else:
action_button = ActionButton(text=text, icon=fname)
action_button.bind(on_press=getattr(self, callback))
actionview.add_widget(action_button)
def configure_subplots(self, *largs):
'''It will be implemented later.'''
pass
def dismiss_popup(self):
self._popup.dismiss()
def show_save(self):
'''Displays a popup widget to perform a save operation.'''
content = SaveDialog(save=self.save, cancel=self.dismiss_popup)
self._popup = Popup(title="Save file", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def save(self, path, filename):
self.canvas.export_to_png(os.path.join(path, filename))
self.dismiss_popup()
def save_figure(self, *args):
self.show_save()
def draw_rubberband(self, event, x0, y0, x1, y1):
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1) + self.canvas.x, min(y0, y1)
+ self.canvas.y, w, h)]
if self.lastrect is None:
self.canvas.canvas.add(Color(*self.rubberband_color))
else:
self.canvas.canvas.remove(self.lastrect)
self.lastrect = InstructionGroup()
self.lastrect.add(Line(rectangle=rect, width=1.0, dash_length=5.0,
dash_offset=5.0))
self.lastrect.add(Color(1.0, 0.0, 0.0, 0.2))
self.lastrect.add(Rectangle(pos=(rect[0], rect[1]),
size=(rect[2], rect[3])))
self.canvas.canvas.add(self.lastrect)
def release_zoom(self, event):
self.lastrect = None
return super(NavigationToolbar2Kivy, self).release_zoom(event)
class GraphicsContextKivy(GraphicsContextBase, object):
'''The graphics context provides the color, line styles, etc... All the
mapping between matplotlib and kivy styling is done here.
The GraphicsContextKivy stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0) such as in the Kivy framework.
Lines properties and styles are set accordingly to the kivy framework
definition for Line.
'''
_capd = {
'butt': 'square',
'projecting': 'square',
'round': 'round',
}
line = {}
def __init__(self, renderer):
super(GraphicsContextKivy, self).__init__()
self.renderer = renderer
self.line['cap_style'] = self.get_capstyle()
self.line['join_style'] = self.get_joinstyle()
self.line['dash_offset'] = None
self.line['dash_length'] = None
self.line['dash_list'] = []
def set_capstyle(self, cs):
'''Set the cap style based on the kivy framework cap styles.
'''
GraphicsContextBase.set_capstyle(self, cs)
self.line['cap_style'] = self._capd[self._capstyle]
def set_joinstyle(self, js):
'''Set the join style based on the kivy framework join styles.
'''
GraphicsContextBase.set_joinstyle(self, js)
self.line['join_style'] = js
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
# dash_list is a list with numbers denoting the number of points
# in a dash and if it is on or off.
if dash_list is not None:
self.line['dash_list'] = dash_list
if dash_offset is not None:
self.line['dash_offset'] = int(dash_offset)
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
self.line['width'] = w
def _get_style_dict(self, rgbFace):
'''Return the style string. style is generated from the
GraphicsContext and rgbFace
'''
attrib = {}
forced_alpha = self.get_forced_alpha()
if rgbFace is None:
attrib['fill'] = 'none'
else:
if tuple(rgbFace[:3]) != (0, 0, 0):
attrib['fill'] = str(rgbFace)
if len(rgbFace) == 4 and rgbFace[3] != 1.0 and not forced_alpha:
attrib['fill-opacity'] = str(rgbFace[3])
if forced_alpha and self.get_alpha() != 1.0:
attrib['opacity'] = str(self.get_alpha())
offset, seq = self.get_dashes()
if seq is not None:
attrib['line-dasharray'] = ','.join(['%f' % val for val in seq])
attrib['line-dashoffset'] = six.text_type(float(offset))
linewidth = self.get_linewidth()
if linewidth:
rgb = self.get_rgb()
attrib['line'] = str(rgb)
if not forced_alpha and rgb[3] != 1.0:
attrib['line-opacity'] = str(rgb[3])
if linewidth != 1.0:
attrib['line-width'] = str(linewidth)
if self.get_joinstyle() != 'round':
attrib['line-linejoin'] = self.get_joinstyle()
if self.get_capstyle() != 'butt':
attrib['line-linecap'] = _capd[self.get_capstyle()]
return attrib
class TimerKivy(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Kivy for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = Clock.schedule_interval(self._on_timer, self._interval / 1000.0)
def _timer_stop(self):
if self._timer is not None:
Clock.unschedule(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self, dt):
super(TimerKivy, self)._on_timer()
class FigureCanvasKivy(FocusBehavior, Widget, FigureCanvasBase):
'''FigureCanvasKivy class. See module documentation for more information.
'''
def __init__(self, figure, **kwargs):
Window.bind(mouse_pos=self._on_mouse_pos)
self.bind(size=self._on_size_changed)
self.bind(pos=self._on_pos_changed)
self.entered_figure = True
self.figure = figure
super(FigureCanvasKivy, self).__init__(figure=self.figure, **kwargs)
def draw(self):
'''Draw the figure using the KivyRenderer
'''
self.clear_widgets()
self.canvas.clear()
self._renderer = RendererKivy(self)
self.figure.draw(self._renderer)
def on_touch_down(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `scroll_event`, `button_press_event`,
`enter_notify_event` and `leave_notify_event`
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
if super(FigureCanvasKivy, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos):
self.motion_notify_event(x, y, guiEvent=None)
touch.grab(self)
if 'button' in touch.profile and touch.button in ("scrollup", "scrolldown",):
self.scroll_event(x, y, 5, guiEvent=None)
else:
self.button_press_event(x, y, self.get_mouse_button(touch),
dblclick=False, guiEvent=None)
if self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=None)
else:
if not self.entered_figure:
self.leave_notify_event(guiEvent=None)
return False
def on_touch_move(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `enter_notify_event` and `leave_notify_event`
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
inside = self.collide_point(touch.x, touch.y)
if inside:
self.motion_notify_event(x, y, guiEvent=None)
if not inside and not self.entered_figure:
self.leave_notify_event(guiEvent=None)
self.entered_figure = True
elif inside and self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=(x, y))
self.entered_figure = False
return False
def get_mouse_button(self, touch):
'''Translate kivy convention for left, right and middle click button
into matplotlib int values: 1 for left, 2 for middle and 3 for
right.
'''
if 'button' in touch.profile:
if touch.button == "left":
return 1
elif touch.button == "middle":
return 2
elif touch.button == "right":
return 3
return -1
def on_touch_up(self, touch):
'''Kivy Event to trigger the following matplotlib events:
`scroll_event` and `button_release_event`.
'''
newcoord = self.to_widget(touch.x, touch.y, relative=True)
x = newcoord[0]
y = newcoord[1]
if touch.grab_current is self:
if 'button' in touch.profile and touch.button in ("scrollup", "scrolldown",):
self.scroll_event(x, y, 5, guiEvent=None)
else:
self.button_release_event(x, y, self.get_mouse_button(touch), guiEvent=None)
touch.ungrab(self)
else:
return super(FigureCanvasKivy, self).on_touch_up(touch)
return False
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''Kivy event to trigger matplotlib `key_press_event`.
'''
self.key_press_event(keycode[1], guiEvent=None)
return super(FigureCanvasKivy, self).keyboard_on_key_down(window,
keycode, text, modifiers)
def keyboard_on_key_up(self, window, keycode):
'''Kivy event to trigger matplotlib `key_release_event`.
'''
self.key_release_event(keycode[1], guiEvent=None)
return super(FigureCanvasKivy, self).keyboard_on_key_up(window, keycode)
def _on_mouse_pos(self, *args):
'''Kivy Event to trigger the following matplotlib events:
`motion_notify_event`, `leave_notify_event` and
`enter_notify_event`.
'''
pos = args[1]
newcoord = self.to_widget(pos[0], pos[1], relative=True)
x = newcoord[0]
y = newcoord[1]
inside = self.collide_point(*pos)
if inside:
self.motion_notify_event(x, y, guiEvent=None)
if not inside and not self.entered_figure:
self.leave_notify_event(guiEvent=None)
self.entered_figure = True
elif inside and self.entered_figure:
self.enter_notify_event(guiEvent=None, xy=(pos[0], pos[1]))
self.entered_figure = False
def enter_notify_event(self, guiEvent=None, xy=None):
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def leave_notify_event(self, guiEvent=None):
event = Event('figure_leave_event', self, guiEvent)
self.callbacks.process('figure_leave_event', event)
def _on_pos_changed(self, *args):
self.draw()
def _on_size_changed(self, *args):
'''Changes the size of the matplotlib figure based on the size of the
widget. The widget will change size according to the parent Layout
size.
'''
w, h = self.size
dpival = self.figure.dpi
winch = float(w) / dpival
hinch = float(h) / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self.resize_event()
self.draw()
def callback(self, *largs):
self.draw()
def blit(self, bbox=None):
'''If bbox is None, blit the entire canvas to the widget. Otherwise
blit only the area defined by the bbox.
'''
self.blitbox = bbox
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['png'] = 'Portable Network Graphics'
def print_png(self, filename, *args, **kwargs):
'''Call the widget function to make a png of the widget.
'''
fig = FigureCanvasAgg(self.figure)
FigureCanvasAgg.draw(fig)
l, b, w, h = self.figure.bbox.bounds
texture = Texture.create(size=(w, h))
texture.blit_buffer(bytes(fig.get_renderer().buffer_rgba()),
colorfmt='rgba', bufferfmt='ubyte')
texture.flip_vertical()
img = Image(texture)
img.save(filename)
def get_default_filetype(self):
return 'png'
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerKivy(*args, **kwargs)
class FigureManagerKivy(FigureManagerBase):
'''The FigureManager main function is to instantiate the backend navigation
toolbar and to call show to instantiate the App.
'''
def __init__(self, canvas, num):
super(FigureManagerKivy, self).__init__(canvas, num)
self.canvas = canvas
self.toolbar = self._get_toolbar()
def show(self):
pass
def get_window_title(self):
return Window.title
def set_window_title(self, title):
Window.title = title
def resize(self, w, h):
if (w > 0) and (h > 0):
Window.size = w, h
def _get_toolbar(self):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Kivy(self.canvas)
else:
toolbar = None
return toolbar
'''Now just provide the standard names that backend.__init__ is expecting
'''
FigureCanvas = FigureCanvasKivy
FigureManager = FigureManagerKivy
NavigationToolbar = NavigationToolbar2Kivy
| mit |
buckiracer/data-science-from-scratch | dataScienceFromScratch/DataScienceFromScratch/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
stargaser/astropy | examples/io/split-jpeg-to-fits.py | 3 | 2472 | # -*- coding: utf-8 -*-
"""
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <http://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print("Image size: {} x {}".format(xsize, ysize))
plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
##############################################################################
# Write out the channels as separate FITS images
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
| bsd-3-clause |
peckhams/topoflow | topoflow/components/met_base.py | 1 | 111479 |
## Does "land_surface_air__latent_heat_flux" make sense? (2/5/13)
# Copyright (c) 2001-2014, Scott D. Peckham
#
# Sep 2014. Fixed sign error in update_bulk_richardson_number().
# Ability to compute separate P_snow and P_rain.
# Aug 2014. New CSDMS Standard Names and clean up.
# Nov 2013. Converted TopoFlow to a Python package.
#
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names (version 0.7.9) and BMI.
# May 2012. P is now a 1D array with one element and mutable,
# so any comp with ref to it can see it change.
# Jun 2010. update_net_shortwave_radiation(), etc.
# May 2010. Changes to initialize() and read_cfg_file().
# Aug 2009
# Jan 2009. Converted from IDL.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a "base class" for meteorology
# components as well as any functions used by most or
# all meteorology methods. The methods of this class
# should be over-ridden as necessary for different
# methods of modeling meteorology.
#-----------------------------------------------------------------------
# Notes: Do we ever need to distinguish between a surface
# temperature and snow temperature (in the snow) ?
# Recall that a separate T_soil_x variable is used
# to compute Qc.
#
# Cp_snow is from NCAR CSM Flux Coupler web page
#
# rho_H2O is currently not adjustable with GUI. (still true?)
#
#-----------------------------------------------------------------------
#
# class met_component (inherits from BMI_base.py)
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/15/12)
# get_output_var_names() # (5/15/12)
# get_var_name() # (5/15/12)
# get_var_units() # (5/15/12)
# ---------------------
# set_constants()
# initialize()
# update()
# finalize()
# ----------------------------
# set_computed_input_vars()
# initialize_computed_vars()
# ----------------------------
# update_P_integral()
# update_P_max()
# update_P_rain() # (9/14/14, new method)
# update_P_snow() # (9/14/14, new method)
# ------------------------------------
# update_bulk_richardson_number()
# update_bulk_aero_conductance()
# update_sensible_heat_flux()
# update_saturation_vapor_pressure()
# update_vapor_pressure()
# update_dew_point() # (7/6/10)
# update_precipitable_water_content() # (7/6/10)
# ------------------------------------
# update_latent_heat_flux()
# update_conduction_heat_flux()
# update_advection_heat_flux()
# ------------------------------------
# update_julian_day() # (7/1/10)
# update_net_shortwave_radiation() # (7/1/10)
# update_em_air() # (7/1/10)
# update_net_longwave_radiation() # (7/1/10)
# update_net_energy_flux() # ("Q_sum")
# ------------------------------------
# open_input_files()
# read_input_files()
# close_input_files()
# ------------------------------------
# update_outfile_names()
# open_output_files()
# write_output_files()
# close_output_files()
# save_grids()
# save_pixel_values()
#
# Functions:
# compare_em_air_methods()
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import solar_funcs as solar
from topoflow.utils import BMI_base
from topoflow.utils import model_input
from topoflow.utils import model_output
from topoflow.utils import rtg_files
#-----------------------------------------------------------------------
class met_component( BMI_base.BMI_component ):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'TopoFlow_Meteorology',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#-------------------------------------------------------------
'comp_name': 'Meteorology',
'model_family': 'TopoFlow',
'cfg_template_file': 'Meteorology.cfg.in',
'cfg_extension': '_meteorology.cfg',
'cmt_var_prefix': '/Meteorology/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Meteorology.xml',
'dialog_title': 'Meteorology: Method 1 Parameters',
'time_units': 'seconds' }
#---------------------------------------------------------
# Note that SWE = "snow water equivalent", but it really
# just means "liquid_equivalent".
#---------------------------------------------------------
_input_var_names = [
'snowpack__z_mean_of_mass-per-volume_density', # rho_snow
'snowpack__depth', # h_snow
'snowpack__liquid-equivalent_depth', # h_swe
'snowpack__melt_volume_flux' ] # SM (MR used for ice?)
#-----------------------------------------------------------
# albedo, emissivity and transmittance are dimensionless.
#-----------------------------------------------------------
# "atmosphere_aerosol_dust__reduction_of_transmittance" vs.
# This TF parameter comes from Dingman, App. E, p. 604.
#-----------------------------------------------------------
# There is an Optical_Air_Mass function in solar_funcs.py.
# However, this quantity is not saved in comp state.
#
# "optical_path_length_ratio" vs. "optical_air_mass" OR
# "airmass_factor" OR "relative_airmass" OR
# "relative_optical_path_length"
#-----------------------------------------------------------
# Our term "liquid_equivalent_precipitation" is widely
# used on the Internet, with 374,000 Google hits.
#--------------------------------------------------------------
# Note: "bulk exchange coefficient" has 2460 Google hits.
# It is closely related to a "transfer coefficient"
# for mass, momentum or heat. There are no CF
# Standard Names with "bulk", "exchange" or "transfer".
#
# Zhang et al. (2000) use "bulk exchange coefficient" in a
# nonstandard way, with units of velocity vs. unitless.
#
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
#---------------------------------------------------------------
# Now this component uses T_air to break the liquid-equivalent
# precip rate into separate P_rain and P_snow components.
# P_rain is used by channel_base.update_R()
# P_snow is used by snow_base.update_depth()
#---------------------------------------------------------------
_output_var_names = [
# 'atmosphere__optical_path_length_ratio', # M_opt [1] (in solar_funcs.py)
# 'atmosphere__von_karman_constant', # kappa
'atmosphere_aerosol_dust__reduction_of_transmittance', # dust_atten ##### (from GUI)
'atmosphere_air-column_water-vapor__liquid-equivalent_depth', # W_p ("precipitable depth")
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor', # canopy_factor
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor', # cloud_factor
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance', # De [m s-1], latent
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance', # Dh [m s-1], sensible
'atmosphere_bottom_air__emissivity', # em_air
'atmosphere_bottom_air__mass-per-volume_density', # rho_air
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity', # Cp_air
'atmosphere_bottom_air__neutral_bulk_aerodynamic_conductance', # Dn [m s-1], neutral
'atmosphere_bottom_air__pressure', # p0
'atmosphere_bottom_air__temperature', # T_air
'atmosphere_bottom_air_flow__bulk_richardson_number', # Ri [1]
'atmosphere_bottom_air_flow__log_law_roughness_length', # z0_air
'atmosphere_bottom_air_flow__reference-height_speed', # uz
'atmosphere_bottom_air_flow__speed_reference_height', # z
'atmosphere_bottom_air_land_net-latent-heat__energy_flux', # Qe [W m-2]
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux', # Qh [W m-2]
'atmosphere_bottom_air_water-vapor__dew_point_temperature', # T_dew
'atmosphere_bottom_air_water-vapor__partial_pressure', # e_air # (insert "reference_height" ??)
'atmosphere_bottom_air_water-vapor__relative_saturation', # RH
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure', # e_sat_air
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux', # vol_P
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux', # P_max
'atmosphere_water__precipitation_leq-volume_flux', # P [m s-1]
'atmosphere_water__rainfall_volume_flux', # P_rain [m s-1] (liquid)
'atmosphere_water__snowfall_leq-volume_flux', # P_snow [m s-1]
'earth__standard_gravity_constant', # g [m s-2]
'land_surface__albedo', # albedo
'land_surface__aspect_angle', # alpha (from GUI)
'land_surface__emissivity', # em_surf
'land_surface__latitude', # lat_deg [degrees]
'land_surface__longitude', # lon_deg [degrees]
'land_surface__slope_angle', # beta (from GUI)
'land_surface__temperature', # T_surf ### OR JUST "land__temperature"?
# 'land_surface_air__temperature', # T_air
'land_surface_air_water-vapor__partial_pressure', # e_surf # (insert "reference_height" ??)
'land_surface_air_water-vapor__saturated_partial_pressure', # e_sat_surf
'land_surface_net-longwave-radiation__energy_flux', # Qn_LW [W m-2]
'land_surface_net-shortwave-radiation__energy_flux', # Qn_SW [W m-2]
'land_surface_net-total-energy__energy_flux', # Q_sum [W w-2]
'model__time_step', # dt
'physics__stefan_boltzmann_constant', # sigma [W m-2 K-4]
'physics__von_karman_constant', # kappa [1]
'water__mass-specific_latent_fusion_heat', # Lf [J kg-1]
'water__mass-specific_latent_vaporization_heat', # Lv [J kg-1]
'water-liquid__mass-per-volume_density' ] # rho_H2O
#-----------------------------------------
# These are used only in solar_funcs.py
# Later, create a Radiation component.
#---------------------------------------------
# Should we allow "day" as a base quantity ?
# "day_length" is confusing. Think about "date" also.
# Maybe something like:
#
# "earth__mean_solar_rotation_period"
# "earth__sidereal_rotation_period"
# "earth__stellar_rotation_period" (relative to "fixed stars")
# maybe: "earth__complete_rotation_period" ??
#
# OR:
# "earth_mean_solar_day__duration"
# "earth_sidereal_day__duration"
# "earth_stellar_day__duration"
#
# OR perhaps:
# "earth_mean_solar_day__rotation_period"
# "earth_sidereal_day__rotation_period"
# "earth_stellar_day__rotation_period"
#
# "stellar rotation period" gives 84,500 Google hits.
# "solar_rotation_period" gives 41,100 Google hits.
# "sidereal_roation_period" gives 86,000 Google hits.
# "stellar day" gives 136,000 Google hits (but many unrelated).
#
# NB! "stellar_rotation_period" is ambiguous since it is also
# used for the rotation period of a star.
#
# "earth_mean_solar_day__hour_count" ("standard_day" ?)
# "earth_sidereal_day__hour_count"
# "earth_sidereal_day__duration"
# "earth__rotation_period" = "sidereal_day"
#
# "earth_stellar_day__period" ??
# "earth_stellar_day__duration" ??
#
#------------------------------------------------------------------
# For "earth__rotation_rate", it seems this should be based on
# the sidereal day (23.93 hours) instead of the mean solar day.
#------------------------------------------------------------------
# There are at least a few online sources that use both terms:
# "equivalent latitude" and "equivalent longitude". See:
# "The Construction and Application of a Martian Snowpack Model".
#------------------------------------------------------------------
# Adopt the little-used term: "topographic_sunrise" ?
# Or maybe "illuminated_topography", or "local_sunrise" ??
#------------------------------------------------------------------
# For angle relations between the earth and the sun, should we
# just use the adjective "solar" in the quantity name or include
# sun in the object name? We could also use terms like:
# earth_to_sun__declination_angle
# earth_to_sun__right_ascension_angle
#
#------------------------------------------------------------------
# The adjective "local" in "earth_local_apparent_noon__time"
# may be helpful in other contexts such as:
# 'earth__local_longitude' and 'land_surface__local_elevation'.
#------------------------------------------------------------------
# 'earth__autumnal_equinox_date',
# 'earth__autumnal_equinox_time',
# 'earth_axis__ecliptic_tilt_angle', # tilt_angle
# 'earth__julian_day_number', ########
# 'earth__julian_day_angle',
# 'earth__local_apparent_noon_time'
# 'earth__mean_radius',
# 'earth__mean_solar_day_duration', # (exactly 24 hours)
# 'earth_orbit__eccentricity',
# 'earth_orbit__period', # (one year)
# 'earth__perihelion_julian_day', ######
# 'earth__rotation_period', ######
# 'earth__rotation_rate', # Omega ###### What about Angular Velocity ?
# 'earth__sidereal_day_duration', # (one rotation = 23.934470 hours)
# 'earth__solar_declination_angle',
# 'earth__solar_hour_angle',
# 'earth__solar_irradiation_constant', ## (or "insolation_constant" ??)
# 'earth__solar_right_ascension_angle',
# 'earth__solar_vertical_angle', (complement of zenith angle)
# 'earth__solar_zenith_angle',
# 'earth__stellar_day_duration', # (relative to the "fixed stars")
# 'earth__summer_solstice_date',
# 'earth__summer_solstice_time',
# 'earth__topographic_sunrise_equivalent_latitude',
# 'earth__topographic_sunrise_equivalent_longitude', (flat_lon + offset)
# 'earth__topographic_sunrise_equivalent_longitude_offset',
# 'earth__topographic_sunrise_time',
# 'earth__topographic_sunset_time',
# 'earth_true_solar_noon___time', #####
# 'earth_clock__true_solar_noon_time'
# 'earth__vernal_equinox_date',
# 'earth__vernal_equinox_time',
# 'earth__winter_solstice_date',
# 'earth__winter_solstice_time',
#
# What about a "slope_corrected" or "topographic" version of K_dir ?
#
# 'land_surface__backscattered_shortwave_irradiation_flux', # K_bs
# 'land_surface__diffuse_shortwave_irradiation_flux', # K_dif
# 'land_surface__direct_shortwave_irradiation_flux', # K_dir
# 'land_surface__global_shortwave_irradiation_flux', # K_glob = K_dif + K_dir
#------------------------------------------------------------------
#------------------------------------------------------------------
# Maybe we should rename "z" to "z_ref" and "uz" to "uz_ref" ?
#------------------------------------------------------------------
_var_name_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'rho_snow',
'snowpack__depth': 'h_snow',
'snowpack__liquid-equivalent_depth': 'h_swe',
'snowpack__melt_volume_flux': 'SM', # (MR is used for ice)
#-----------------------------------------------------------------
#'atmosphere__optical_path_length_ratio': 'M_opt', # (in solar_funcs.py)
# 'atmosphere__von_karman_constant': 'kappa',
'atmosphere_aerosol_dust__reduction_of_transmittance': 'dust_atten',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'W_p', #########
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': 'canopy_factor',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': 'cloud_factor',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'De',
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'Dh',
'atmosphere_bottom_air__emissivity': 'em_air',
'atmosphere_bottom_air__mass-per-volume_density': 'rho_air',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'Cp_air',
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'Dn',
'atmosphere_bottom_air__pressure': 'p0',
'atmosphere_bottom_air__temperature': 'T_air',
'atmosphere_bottom_air_flow__bulk_richardson_number': 'Ri',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'z0_air', ## (not "z0")
'atmosphere_bottom_air_flow__reference-height_speed': 'uz',
'atmosphere_bottom_air_flow__speed_reference_height': 'z',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'Qe',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'Qh',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'T_dew',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'e_air',
'atmosphere_bottom_air_water-vapor__relative_saturation': 'RH',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'e_sat_air',
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'vol_P',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'P_max',
'atmosphere_water__precipitation_leq-volume_flux': 'P',
'atmosphere_water__rainfall_volume_flux': 'P_rain',
'atmosphere_water__snowfall_leq-volume_flux': 'P_snow',
'earth__standard_gravity_constant': 'g',
'land_surface__albedo': 'albedo',
'land_surface__aspect_angle': 'alpha',
'land_surface__emissivity': 'em_surf',
'land_surface__latitude': 'lat_deg',
'land_surface__longitude': 'lon_deg',
'land_surface__slope_angle': 'beta',
'land_surface__temperature': 'T_surf',
# 'land_surface_air__temperature': 'T_surf',
'land_surface_air_water-vapor__partial_pressure': 'e_surf',
'land_surface_air_water-vapor__saturated_partial_pressure': 'e_sat_surf',
'land_surface_net-longwave-radiation__energy_flux': 'Qn_LW',
'land_surface_net-shortwave-radiation__energy_flux': 'Qn_SW',
'land_surface_net-total-energy__energy_flux': 'Q_sum',
'model__time_step': 'dt',
'physics__stefan_boltzmann_constant': 'sigma',
'physics__von_karman_constant': 'kappa',
'water__mass-specific_latent_fusion_heat': 'Lf',
'water__mass-specific_latent_vaporization_heat': 'Lv',
'water-liquid__mass-per-volume_density': 'rho_H2O' }
#-----------------------------------------------------------------
# Note: The "update()" function calls several functions with the
# MBAR keyword set to get units of "mbar" vs. "kPa".
#-----------------------------------------------------------------
# Note: We need to be careful with whether units are C or K,
# for all "thermal" quantities (e.g. thermal_capacity).
#-----------------------------------------------------------------
# Note: ARHYTHM had 3 "bulk exchange coefficients" that are all
# equal and therefore have the same units of [m s-1].
# Double-check that this is what is intended. ##########
#-----------------------------------------------------------------
# Note: "atmosphere_column_water__liquid_equivalent_depth" has
# units of "cm", as in Dingman's book. Make sure it gets
# used correctly in equations.
#-----------------------------------------------------------------
# Note: slope_angle and aspect_angle have units of RADIANS.
# aspect_angle is measured CW from north.
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east. They are converted for use here.
#-----------------------------------------------------------------
_var_units_map = {
'snowpack__z_mean_of_mass-per-volume_density': 'kg m-3',
'snowpack__depth': 'm',
'snowpack__liquid-equivalent_depth': 'm',
'snowpack__melt_volume_flux': 'm s-1',
#-------------------------------------------------------------
# 'atmosphere__optical_path_length_ratio': '1',
# 'atmosphere__von_karman_constant': '1',
'atmosphere_aerosol_dust__reduction_of_transmittance': '1',
'atmosphere_air-column_water-vapor__liquid-equivalent_depth': 'cm', # (see Notes above)
'atmosphere_bottom_air__brutsaert_emissivity_canopy_factor': '1',
'atmosphere_bottom_air__brutsaert_emissivity_cloud_factor': '1',
'atmosphere_bottom_air__bulk_latent_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__bulk_sensible_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__emissivity': '1',
'atmosphere_bottom_air__mass-per-volume_density': 'kg m-3',
'atmosphere_bottom_air__mass-specific_isobaric_heat_capacity': 'J kg-1 K-1', # (see Notes above)
'atmosphere_bottom_air__neutral_bulk_heat_aerodynamic_conductance': 'm s-1', # (see Notes above)
'atmosphere_bottom_air__pressure': 'mbar',
'atmosphere_bottom_air__temperature': 'deg_C', # (see Notes above)
'atmosphere_bottom_air_flow__bulk_richardson_number': '1',
'atmosphere_bottom_air_flow__log_law_roughness_length': 'm',
'atmosphere_bottom_air_flow__reference-height_speed': 'm s-1',
'atmosphere_bottom_air_flow__speed_reference_height': 'm',
'atmosphere_bottom_air_land_net-latent-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_land_net-sensible-heat__energy_flux': 'W m-2',
'atmosphere_bottom_air_water-vapor__dew_point_temperature': 'deg_C',
'atmosphere_bottom_air_water-vapor__partial_pressure': 'mbar', # (see Notes above)
'atmosphere_bottom_air_water-vapor__relative_saturation': '1',
'atmosphere_bottom_air_water-vapor__saturated_partial_pressure': 'mbar', # (see Notes above)
'atmosphere_water__domain_time_integral_of_precipitation_leq-volume_flux': 'm3',
'atmosphere_water__domain_time_max_of_precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__precipitation_leq-volume_flux': 'm s-1',
'atmosphere_water__rainfall_volume_flux': 'm s-1', # (see Notes above)
'atmosphere_water__snowfall_leq-volume_flux': 'm s-1', # (see Notes above)
'earth__standard_gravity_constant': 'm s-2',
'land_surface__albedo': '1',
'land_surface__aspect_angle': 'radians', # (see Notes above)
'land_surface__emissivity': '1',
'land_surface__latitude': 'degrees',
'land_surface__longitude': 'degrees',
'land_surface__slope_angle': 'radians',
'land_surface__temperature': 'deg_C',
# 'land_surface_air__temperature': 'deg_C',
'land_surface_air_water-vapor__partial_pressure': 'mbar',
'land_surface_air_water-vapor__saturated_partial_pressure': 'mbar',
'land_surface_net-longwave-radiation__energy_flux': 'W m-2',
'land_surface_net-shortwave-radiation__energy_flux': 'W m-2',
'land_surface_net-total-energy__energy_flux': 'W m-2',
'model__time_step': 's',
'physics__stefan_boltzmann_constant': 'W m-2 K-4',
'physics__von_karman_constant': '1',
'water__mass-specific_latent_fusion_heat': 'J kg-1',
'water__mass-specific_latent_vaporization_heat': 'J kg-1',
'water-liquid__mass-per-volume_density': 'kg m-3' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Meteorology'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
## def get_var_type(self, long_var_name):
##
## #---------------------------------------
## # So far, all vars have type "double",
## # but use the one in BMI_base instead.
## #---------------------------------------
## return 'float64'
##
## # get_var_type()
#-------------------------------------------------------------------
def set_constants(self):
#---------------------------------
# Define some physical constants
#---------------------------------
self.g = np.float64(9.81) # [m s-2, gravity]
self.kappa = np.float64(0.408) # [1] (von Karman)
self.rho_H2O = np.float64(1000) # [kg m-3]
self.rho_air = np.float64(1.2614) # [kg m-3]
self.Cp_air = np.float64(1005.7) # [J kg-1 K-1]
self.Lv = np.float64(2500000) # [J kg-1] Latent heat of vaporiz.
self.Lf = np.float64(334000) # [J kg-1 = W s kg-1], Latent heat of fusion
self.sigma = np.float64(5.67E-8) # [W m-2 K-4] (Stefan-Boltzman constant)
self.C_to_K = np.float64(273.15) # (add to convert deg C to K)
self.twopi = np.float64(2) * np.pi
self.one_seventh = np.float64(1) / 7
self.hours_per_day = np.float64(24)
self.secs_per_day = np.float64(3600) * self.hours_per_day
#---------------------------
# See update_latent_heat()
#-----------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000, p. 1002).
# Is this constant actually the dimensionless ratio of
# the molecular weight of water to that of dry air ?
#-----------------------------------------------------------
## self.latent_heat_constant = np.float64(0.622)
self.latent_heat_constant = np.float64(0.662)
#----------------------------------------
# Constants related to precip (9/24/09)
#----------------------------------------
self.mmph_to_mps = (np.float64(1) / np.float64(3600000))
self.mps_to_mmph = np.float64(3600000)
self.forever = np.float64(999999999) # [minutes]
#------------------------------------------------
# Only needed for method 1, where all rates and
# durations are read as 1D arrays from GUI.
# Method 1 may be removed in a future version.
#------------------------------------------------
## self.method1_rates = None
## self.method1_durations = None
## self.method1_n_rates = 0
# set_constants()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
if not(SILENT):
print ' '
print 'Meteorology component: Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
## print ' Calling read_grid_info()...'
self.read_grid_info()
## print ' Calling initialize_basin_vars()...'
self.initialize_basin_vars() # (5/14/10)
#----------------------------------------------------
# NB! This read_input_files() uses self.time_index.
# Also needs to be before "Disabled" test.
#----------------------------------------------------
## print ' Calling initialize_time_vars()...'
self.initialize_time_vars()
#-------------------------------------------------
# (5/19/12) This makes P "mutable", which allows
# its updated values to be seen by any component
# that has a reference to it.
# Note that P will typically be read from a file.
#-------------------------------------------------
self.P = self.initialize_var( self.P_type )
self.P_rain = self.initialize_var( self.P_type )
self.P_snow = self.initialize_var( self.P_type )
#------------------------------------------------------------
# If "Enabled", will call initialize_computed_vars() below.
#------------------------------------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print 'Meteorology component: Disabled in CFG file.'
self.e_air = self.initialize_scalar(0, dtype='float64')
self.e_surf = self.initialize_scalar(0, dtype='float64')
self.em_air = self.initialize_scalar(0, dtype='float64')
self.Qn_SW = self.initialize_scalar(0, dtype='float64')
self.Qn_LW = self.initialize_scalar(0, dtype='float64')
self.Q_sum = self.initialize_scalar(0, dtype='float64')
self.Qc = self.initialize_scalar(0, dtype='float64')
self.Qa = self.initialize_scalar(0, dtype='float64')
self.DONE = True
self.status = 'initialized'
return
#-----------------------------------------------
# Read from files as needed to initialize vars
#-----------------------------------------------
self.open_input_files()
self.read_input_files() # (initializes P)
## self.check_input_types() # (not needed so far)
#-----------------------
# Initialize variables
#-----------------------
## print ' Calling initialize_computed_vars()...'
self.initialize_computed_vars() # (after read_input_files)
if not(self.PRECIP_ONLY):
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
## def update(self, dt=-1.0, time_seconds=None):
def update(self, dt=-1.0):
#----------------------------------------------------------
# Note: The read_input_files() method is first called by
# the initialize() method. Then, the update()
# method is called one or more times, and it calls
# other update_*() methods to compute additional
# variables using input data that was last read.
# Based on this pattern, read_input_files() should
# be called at end of update() method as done here.
# If the input files don't contain any additional
# data, the last data read persists by default.
#----------------------------------------------------------
if (self.comp_status == 'Disabled'): return
self.status = 'updating' # (OpenMI 2.0 convention)
#-------------------------------------------
# Update computed values related to precip
#-------------------------------------------
self.update_P_integral()
self.update_P_max()
self.update_P_rain()
self.update_P_snow()
#-------------------------
# Update computed values
#-------------------------
if not(self.PRECIP_ONLY):
self.update_bulk_richardson_number()
self.update_bulk_aero_conductance()
self.update_sensible_heat_flux()
self.update_saturation_vapor_pressure(MBAR=True)
self.update_saturation_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_vapor_pressure(MBAR=True)
self.update_dew_point() ###
self.update_precipitable_water_content() ###
self.update_vapor_pressure(MBAR=True, SURFACE=True) ########
self.update_latent_heat_flux() # (uses e_air and e_surf)
self.update_conduction_heat_flux()
self.update_advection_heat_flux()
self.update_julian_day()
self.update_net_shortwave_radiation()
self.update_em_air()
self.update_net_longwave_radiation()
self.update_net_energy_flux() # (at the end)
#----------------------------------------
# Read next met vars from input files ?
#-------------------------------------------
# Note that read_input_files() is called
# by initialize() and these values must be
# used for "update" calls before reading
# new ones.
#-------------------------------------------
if (self.time_index > 0):
self.read_input_files()
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
# Components use own self.time_sec by default.
#-----------------------------------------------
if not(self.PRECIP_ONLY):
self.write_output_files()
## self.write_output_files( time_seconds )
#-----------------------------
# Update internal clock
# after write_output_files()
#-----------------------------
self.update_time( dt )
self.status = 'updated' # (OpenMI)
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
if (self.comp_status == 'Enabled'):
self.close_input_files() ## TopoFlow input "data streams"
if not(self.PRECIP_ONLY):
self.close_output_files()
self.status = 'finalized' # (OpenMI)
self.print_final_report(comp_name='Meteorology component')
# finalize()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
#-----------------------------------------------
# Convert precip rate units from mm/h to m/s ?
#-----------------------------------------------
# NB! read_input_files() does this for files.
#-----------------------------------------------
if (self.P_type == 'Scalar'):
## print '######## self.P_type =', self.P_type
## print '######## type(self.P) =', type(self.P)
## print '######## self.P =', self.P
## print '######## Converting scalar P from MMPH to MPS.'
#-----------------------------------------------------
# (2/7/13) Must use "*=" here to preserve reference.
#-----------------------------------------------------
self.P *= self.mmph_to_mps
## self.P = self.P * self.mmph_to_mps
print 'Scalar rainrate set to:', self.P, ' [mmph]'
#---------------------------------
# Process the PRECIP_ONLY toggle
#---------------------------------
if not(hasattr(self, 'PRECIP_ONLY')):
self.PRECIP_ONLY = False
elif (self.PRECIP_ONLY.lower() == 'yes'):
self.PRECIP_ONLY = True
else:
self.PRECIP_ONLY = False
#---------------------------------------
# Print info message about PRECIP_ONLY
#---------------------------------------
if (self.PRECIP_ONLY):
print '-----------------------------------------'
print ' NOTE: Since PRECIP_ONLY = True, output'
print ' variables will not be computed'
print ' or saved to files.'
print '-----------------------------------------'
print' '
#----------------------------------------------------
# Toggle to use SATTERLUND or BRUTSAERT methods
# for computing e_air and em_air. (Not in GUI yet.)
#----------------------------------------------------
if not(hasattr(self, 'SATTERLUND')):
self.SATTERLUND = False
#---------------------------------------------
# Convert GMT_offset from string to int
# because GUI can't use ints in droplist yet
#---------------------------------------------
self.GMT_offset = np.int16( self.GMT_offset )
#------------------------------------------------
# Convert start_month from string to integer
# January should be 1. See solar.Julian_Day().
#------------------------------------------------
month_list = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
self.start_month = month_list.index( self.start_month ) + 1
#-------------------------------
# Initialize some more toggles
#-------------------------------
if not(hasattr(self, 'SAVE_QSW_GRIDS')):
self.SAVE_QSW_GRIDS = False
if not(hasattr(self, 'SAVE_QLW_GRIDS')):
self.SAVE_QLW_GRIDS = False
#-------------------------------------------
if not(hasattr(self, 'SAVE_QSW_PIXELS')):
self.SAVE_QSW_PIXELS = False
if not(hasattr(self, 'SAVE_QLW_PIXELS')):
self.SAVE_QLW_PIXELS = False
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = np.maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = np.maximum(self.save_pixels_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def initialize_computed_vars(self):
#------------------------------------------------------
# Note: Some of these require "self.rti", which is
# only stored by read_grid_info() after the
# set_computed_input_vars() function is called.
# So these parts can't go there.
#------------------------------------------------------
#---------------------------------------
# Add self.in_directory to:
# slope_grid_file & aspect_grid_file
#---------------------------------------
self.slope_grid_file = (self.in_directory + self.slope_grid_file)
self.aspect_grid_file = (self.in_directory + self.aspect_grid_file)
#-------------------------------------------------
# Read slope grid & convert to slope angle, beta
# NB! RT slope grids have NaNs on edges.
#-------------------------------------------------
slopes = rtg_files.read_grid( self.slope_grid_file, self.rti,
RTG_type='FLOAT' )
beta = np.arctan( slopes )
beta = (self.twopi + beta) % self.twopi
#---------------------------------------------
w_nan = np.where( np.logical_not(np.isfinite(beta)) )
n_nan = np.size(w_nan[0])
if (n_nan != 0):
beta[ w_nan ] = np.float64(0)
#------------------------------------------------------------------
w_bad = np.where( np.logical_or( (beta < 0), (beta > np.pi / 2) ) )
n_bad = np.size(w_bad[0])
if (n_bad != 0):
msg = array(['ERROR: Some slope angles are out of range.', ' '])
for line in msg:
print line
## result = GUI_Message(msg, INFO=True, TITLE='ERROR MESSAGE')
return
self.beta = beta ######
#------------------------------------------------------
# Read aspect grid. Alpha must be CW from north.
# NB! RT aspect grids have NaNs on edges.
#---------------------------------------------------------
# RT files ending in "_mf-angle.rtg" and "fd-aspect.rtg"
# contain aspect values. The former are in [0, 2 Pi]
# while the latter are in [-Pi, Pi] and both measure
# CCW from due east.
#---------------------------------------------------------
aspects = rtg_files.read_grid( self.aspect_grid_file, self.rti,
RTG_type='FLOAT' )
alpha = (np.pi / 2) - aspects
alpha = (self.twopi + alpha) % self.twopi
#-----------------------------------------------
w_nan = np.where( np.logical_not( np.isfinite(alpha) ) )
n_nan = np.size( w_nan[0] )
if (n_nan != 0):
alpha[ w_nan ] = np.float64(0)
self.alpha = alpha ######
#---------------------------
# Create lon and lat grids
#---------------------------
if (self.rti.pixel_geom == 0):
self.lon_deg = solar.Longitude_Grid( self.rti )
self.lat_deg = solar.Latitude_Grid( self.rti )
## print 'Lon grid ='
## print self.lon_deg
## print 'Lat grid ='
## print self.lat_deg
#-----------------------------
# Write grids to RTG files ?
#-----------------------------
## lon_file = (self.out_directory + self.site_prefix + '_lons.bin')
## rtg_files.write_grid( self.lon_deg, lon_file, self.rti )
## lat_file = (self.out_directory + self.site_prefix + '_lats.bin')
## rtg_files.write_grid( self.lat_deg, lat_file, self.rti )
else:
print 'SORRY: Cannot yet create lon and lat grids for'
print ' this DEM because it uses UTM coordinates.'
print ' Will use lat/lon for Denver, Colorado.'
print ' '
#--------------------------------------------
# For now, use scalar values for Denver, CO
#--------------------------------------------
self.lon_deg = np.float64( -104.9841667 )
self.lat_deg = np.float64( 39.7391667 )
## return
#-------------------------------------------------
# Initialize max precip rate with the first rate
#------------------------------------------------
# Note: Need this here because rate may be
# zero at the end of update_precip_rate()
#------------------------------------------------
# vol_P is used for mass balance check.
#------------------------------------------------
P_max = self.P.max() # (after read_input_files)
## self.P_max = self.P.max()
self.P_max = self.initialize_scalar( P_max, dtype='float64')
self.vol_P = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# For using new framework which embeds references from
# meteorology to snow, etc., these need to be defined
# in the initialize step. However, they will most likely
# change from scalar to grid during update, so we need to
# check that the reference isn't broken when the dtype
# changes. (5/17/12)
#----------------------------------------------------------
# These depend on grids alpha and beta, so will be grids.
#----------------------------------------------------------
self.Qn_SW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_LW = np.zeros([self.ny, self.nx], dtype='float64')
self.Qn_tot = np.zeros([self.ny, self.nx], dtype='float64')
self.Q_sum = np.zeros([self.ny, self.nx], dtype='float64')
#----------------------------------------------------------
# self.Qn_SW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_LW = self.initialize_scalar( 0, dtype='float64')
# self.Qn_tot = self.initialize_scalar( 0, dtype='float64')
# self.Q_sum = self.initialize_scalar( 0, dtype='float64')
#----------------------------------------------------------
# These may be scalars or grids.
#---------------------------------
self.Qe = self.initialize_scalar( 0, dtype='float64')
self.e_air = self.initialize_scalar( 0, dtype='float64')
self.e_surf = self.initialize_scalar( 0, dtype='float64')
self.em_air = self.initialize_scalar( 0, dtype='float64')
self.Qc = self.initialize_scalar( 0, dtype='float64')
self.Qa = self.initialize_scalar( 0, dtype='float64')
#------------------------------------
# Initialize the decimal Julian day
#------------------------------------
self.julian_day = solar.Julian_Day( self.start_month,
self.start_day,
self.start_hour )
## print ' julian_day =', self.julian_day
# initialize_computed_vars()
#-------------------------------------------------------------------
def update_P_integral(self):
#---------------------------------------------------
# Notes: This can be used for mass balance checks,
# such as now done by update_mass_totals()
# in topoflow.py. The "dt" here should be
# TopoFlow's "main dt" vs. the process dt.
# dV[i] = P[i] * da[i] * dt, dV = sum(dV[i])
#---------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_integral()...'
#------------------------------------------------
# Update mass total for P, sum over all pixels
#------------------------------------------------
volume = np.double(self.P * self.da * self.dt) # [m^3]
if (np.size(volume) == 1):
self.vol_P += (volume * self.rti.n_pixels)
else:
self.vol_P += np.sum(volume)
# update_P_integral()
#-------------------------------------------------------------------
def update_P_max(self):
if (self.DEBUG):
print 'Calling update_P_max()...'
#-----------------------------------------
# Save the maximum precip. rate in [m/s]
#-------------------------------------------
# Must use "fill()" to preserve reference.
#-------------------------------------------
self.P_max.fill( np.maximum(self.P_max, self.P.max()) )
## self.P_max = np.maximum(self.P_max, self.P.max())
#---------------
# For testing
#--------------
## print '##### P =', self.P * 1000 * 3600 # (mmph)
## print '##### P_max =', self.P_max * 1000 * 3600 # (mmph)
# update_P_max()
#-------------------------------------------------------------------
def update_P_rain(self):
#-----------------------------------------------------------
# Note: This routine is written so that it doesn't matter
# whether P and T_air are grids or scalars.
# For scalars: 1.5 * True = 1.5, 1.5 * False = 0.
# Here are the possible combinations for checking.
#-----------------------------------------------------------
# P T_air P_rain
#----------------------------
# scalar scalar scalar
# scalar grid grid
# grid scalar grid
# grid grid grid
#----------------------------
if (self.DEBUG):
print 'Calling update_P_rain()...'
#-------------------------------------------------
# P_rain is the precip that falls as liquid that
# can contribute to runoff production.
#-------------------------------------------------
# P_rain is used by channel_base.update_R.
#-------------------------------------------------
P_rain = self.P * (self.T_air > 0)
self.update_var( 'P_rain', P_rain ) ## (2/14/17)
# if (np.ndim( self.P_rain ) == 0):
# self.P_rain.fill( P_rain ) #### (mutable scalar)
# else:
# self.P_rain[:] = P_rain
if (self.DEBUG):
if (self.P_rain.max() > 0):
print ' >> Rain is falling...'
#--------------
# For testing
#--------------
## print 'shape(P) =', shape(self.P)
## print 'shape(T_air) =', shape(self.T_air)
## print 'shape(P_rain) =', shape(self.P_rain)
## print 'T_air =', self.T_air
#########################################
#### Old note, to remember for later.
#--------------------------------------------------
# (2/7/13) We must use "*=" to preserve reference
# if P is a "mutable scalar".
#--------------------------------------------------
# update_P_rain()
#-------------------------------------------------------------------
def update_P_snow(self):
#----------------------------------------------------
# Notes: Rain and snow may fall simultaneously at
# different grid cells in the model domain.
#----------------------------------------------------
if (self.DEBUG):
print 'Calling update_P_snow()...'
#-------------------------------------------------
# P_snow is the precip that falls as snow or ice
# that contributes to the snow depth. This snow
# may melt to contribute to runoff later on.
#-------------------------------------------------
# P_snow is used by snow_base.update_depth.
#-------------------------------------------------
P_snow = self.P * (self.T_air <= 0)
self.update_var( 'P_snow', P_snow ) ## (2/14/17)
# if (np.ndim( self.P_snow ) == 0):
# self.P_snow.fill( P_snow ) #### (mutable scalar)
# else:
# self.P_snow[:] = P_snow
if (self.DEBUG):
if (self.P_snow.max() > 0):
print ' >> Snow is falling...'
# update_P_snow()
#-------------------------------------------------------------------
def update_bulk_richardson_number(self):
if (self.DEBUG):
print 'Calling update_bulk_richardson_number()...'
#---------------------------------------------------------------
# (9/6/14) Found a typo in the Zhang et al. (2000) paper,
# in the definition of Ri. Also see Price and Dunne (1976).
# We should have (Ri > 0) and (T_surf > T_air) when STABLE.
# This also removes problems/singularities in the corrections
# for the stable and unstable cases in the next function.
#---------------------------------------------------------------
# Notes: Other definitions are possible, such as the one given
# by Dingman (2002, p. 599). However, this one is the
# one given by Zhang et al. (2000) and is meant for use
# with the stability criterion also given there.
#---------------------------------------------------------------
#### top = self.g * self.z * (self.T_air - self.T_surf) # BUG.
top = self.g * self.z * (self.T_surf - self.T_air)
bot = (self.uz)**2.0 * (self.T_air + np.float64(273.15))
self.Ri = (top / bot)
# update_bulk_richardson_number()
#-------------------------------------------------------------------
def update_bulk_aero_conductance(self):
if (self.DEBUG):
print 'Calling update_bulk_aero_conductance()...'
#----------------------------------------------------------------
# Notes: Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat [m/s]
# De = bulk exchange coeff for vapor [m/s]
# h_snow = snow depth [m]
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# z = height that has wind speed uz [m]
# uz = wind speed at height z [m/s]
# kappa = 0.408 = von Karman's constant [unitless]
# RI = Richardson's number (see function)
#----------------------------------------------------------------
h_snow = self.h_snow # (ref from new framework)
#---------------------------------------------------
# Compute bulk exchange coeffs (neutral stability)
# using the logarithm "law of the wall".
#-----------------------------------------------------
# Note that "arg" = the drag coefficient (unitless).
#-----------------------------------------------------
arg = self.kappa / np.log((self.z - h_snow) / self.z0_air)
Dn = self.uz * (arg)**2.0
#-----------------------------------------------
# NB! Dn could be a scalar or a grid, so this
# must be written to handle both cases.
# Note that WHERE can be used on a scalar:
# IDL> a = 1
# IDL> print, size(a)
# IDL> w = where(a ge 1, nw)
# IDL> print, nw
# IDL> a[w] = 2
# IDL> print, a
# IDL> print, size(a)
#-----------------------------------------------
###########################################################
# NB! If T_air and T_surf are both scalars, then next
# few lines won't work because we can't index the
# resulting empty "w" (even if T_air == T_surf).
###########################################################
## w = np.where(self.T_air != self.T_surf)
## nw = np.size(w[0])
## ## nw = np.size(w,0) # (doesn't work if 2 equal scalars)
#----------------------------------------------------------
T_AIR_SCALAR = (np.ndim( self.T_air ) == 0)
T_SURF_SCALAR = (np.ndim( self.T_surf ) == 0)
if (T_AIR_SCALAR and T_SURF_SCALAR):
if (self.T_air == self.T_surf): nw=1
else: nw=0
else:
w = np.where(self.T_air != self.T_surf)
nw = np.size(w[0])
if (nw == 0):
#--------------------------------------------
# All pixels are neutral. Set Dh = De = Dn.
#--------------------------------------------
self.Dn = Dn
self.Dh = Dn
self.De = Dn
return
#-------------------------------------
# One or more pixels are not neutral
# so make a correction using RI
#---------------------------------------------
# NB! RI could be a grid when Dn is a
# scalar, and this will change Dn to a grid.
#---------------------------------------------
# Ri = Richardson_Number(z, uz, T_air, T_surf)
#--------------------------------------------
# Before 12/21/07. Has bug if RI is a grid
#--------------------------------------------
# w_stable = where(*T_air gt *T_surf, n_stable)
# if (n_stable ne 0) then begin
# Dn[w_stable] = Dn[w_stable]/(1d + (10d * RI))
# endif
# w_unstable = where(*T_air lt *T_surf, n_unstable)
# if (n_unstable ne 0) then begin
#----------------------------------------------
# Multiplication and substraction vs. opposites
# for the stable case. Zhang et al. (2000)
# Hopefully not just a typo.
#----------------------------------------------
# Dn[w_unstable] = Dn[w_unstable]*(1d - (10d * self.Ri))
# endif
#-----------------
# After 12/21/07
#------------------------------------------------------------
# If T_air, T_surf or uz is a grid, then Ri will be a grid.
# This version makes only one call to WHERE, so its faster.
#------------------------------------------------------------
# Multiplication and substraction vs. opposites for the
# stable case (Zhang et al., 2000); hopefully not a typo.
# It plots as a smooth curve through Ri=0.
#------------------------------------------------------------
# (9/7/14) Modified so that Dn is saved, but Dh = De.
#------------------------------------------------------------
Dh = Dn.copy() ### (9/7/14. Save Dn also.)
nD = np.size( Dh )
nR = np.size( self.Ri )
if (nR > 1):
#--------------------------
# Case where RI is a grid
#--------------------------
ws = np.where( self.Ri > 0 )
ns = np.size( ws[0] )
wu = np.where( np.invert(self.Ri > 0) )
nu = np.size( wu[0] )
if (nD == 1):
#******************************************
# Convert Dn to a grid here or somewhere
# Should stop with an error message
#******************************************
dum = np.int16(0)
if (ns != 0):
#----------------------------------------------------------
# If (Ri > 0), or (T_surf > T_air), then STABLE. (9/6/14)
#----------------------------------------------------------
Dh[ws] = Dh[ws] / (np.float64(1) + (np.float64(10) * self.Ri[ws]))
if (nu != 0):
Dh[wu] = Dh[wu] * (np.float64(1) - (np.float64(10) * self.Ri[wu]))
else:
#----------------------------
# Case where Ri is a scalar
#--------------------------------
# Works if Dh is grid or scalar
#--------------------------------
if (self.Ri > 0):
Dh = Dh / (np.float64(1) + (np.float64(10) * self.Ri))
else:
Dh = Dh * (np.float64(1) - (np.float64(10) * self.Ri))
#----------------------------------------------------
# NB! We currently assume that these are all equal.
#----------------------------------------------------
self.Dn = Dn
self.Dh = Dh
self.De = Dh ## (assumed equal)
# update_bulk_aero_conductance()
#-------------------------------------------------------------------
def update_sensible_heat_flux(self):
#--------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
# Dh is returned by Bulk_Exchange_Coeff function
# and is not a pointer.
#--------------------------------------------------------
if (self.DEBUG):
print 'Callilng update_sensible_heat_flux()...'
#---------------------
# Physical constants
#---------------------
# rho_air = 1.225d ;[kg m-3, at sea-level]
# Cp_air = 1005.7 ;[J kg-1 K-1]
#-----------------------------
# Compute sensible heat flux
#-----------------------------
delta_T = (self.T_air - self.T_surf)
self.Qh = (self.rho_air * self.Cp_air) * self.Dh * delta_T
# update_sensible_heat_flux()
#-------------------------------------------------------------------
def update_saturation_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_saturation_vapor_pressure()...'
#----------------------------------------------------------------
#Notes: Saturation vapor pressure is a function of temperature.
# T is temperature in Celsius. By default, the method
# of Brutsaert (1975) is used. However, the SATTERLUND
# keyword is set then the method of Satterlund (1979) is
# used. When plotted, they look almost identical. See
# the Compare_em_air_Method routine in Qnet_file.pro.
# Dingman (2002) uses the Brutsaert method.
# Liston (1995, EnBal) uses the Satterlund method.
# By default, the result is returned with units of kPa.
# Set the MBAR keyword for units of millibars.
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#----------------------------------------------------------------
#NB! Here, 237.3 is correct, and not a misprint of 273.2.
# See footnote on p. 586 in Dingman (Appendix D).
#----------------------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']):
## return
T = self.T_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']):
## return
T = self.T_air
if not(self.SATTERLUND):
#------------------------------
# Use Brutsaert (1975) method
#------------------------------
term1 = (np.float64(17.3) * T) / (T + np.float64(237.3))
e_sat = np.float64(0.611) * np.exp(term1) # [kPa]
else:
#-------------------------------
# Use Satterlund (1979) method ############ DOUBLE CHECK THIS (7/26/13)
#-------------------------------
term1 = np.float64(2353) / (T + np.float64(273.15))
e_sat = np.float64(10) ** (np.float64(11.4) - term1) # [Pa]
e_sat = (e_sat / np.float64(1000)) # [kPa]
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e_sat = (e_sat * np.float64(10)) # [mbar]
if (SURFACE):
self.e_sat_surf = e_sat
else:
self.e_sat_air = e_sat
# update_saturation_vapor_pressure()
#-------------------------------------------------------------------
def update_vapor_pressure(self, MBAR=False,
SURFACE=False):
if (self.DEBUG):
print 'Calling update_vapor_pressure()...'
#---------------------------------------------------
# Notes: T is temperature in Celsius
# RH = relative humidity, in [0,1]
# by definition, it equals (e / e_sat)
# e has units of kPa.
#---------------------------------------------------
if (SURFACE):
## if (self.T_surf_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_surf
else:
## if (self.T_air_type in ['Scalar', 'Grid']) and \
## (self.RH_type in ['Scalar', 'Grid']):
## return
e_sat = self.e_sat_air
e = (self.RH * e_sat)
#-----------------------------------
# Convert units from kPa to mbars?
#-----------------------------------
if (MBAR):
e = (e * np.float64(10)) # [mbar]
if (SURFACE):
self.e_surf = e
else:
self.e_air = e
# update_vapor_pressure()
#-------------------------------------------------------------------
def update_dew_point(self):
if (self.DEBUG):
print 'Calling update_dew_point()...'
#-----------------------------------------------------------
# Notes: The dew point is a temperature in degrees C and
# is a function of the vapor pressure, e_air.
# Vapor pressure is a function of air temperature,
# T_air, and relative humidity, RH.
# The formula used here needs e_air in kPa units.
# See Dingman (2002, Appendix D, p. 587).
#-----------------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
log_vp = np.log( e_air_kPa )
top = log_vp + np.float64(0.4926)
bot = np.float64(0.0708) - (np.float64(0.00421) * log_vp)
self.T_dew = (top / bot) # [degrees C]
# update_dew_point()
#-------------------------------------------------------------------
def update_precipitable_water_content(self):
if (self.DEBUG):
print 'Calling update_precipitable_water_content()...'
#------------------------------------------------------------
# Notes: W_p is precipitable water content in centimeters,
# which depends on air temp and relative humidity.
#------------------------------------------------------------
arg = np.float64( 0.0614 * self.T_dew )
self.W_p = np.float64(1.12) * np.exp( arg ) # [cm]
# update_precipitable_water_content()
#-------------------------------------------------------------------
def update_latent_heat_flux(self):
if (self.DEBUG):
print 'Calling update_latent_heat_flux()...'
#--------------------------------------------------------
# Notes: Pressure units cancel out because e_air and
# e_surf (in numer) have same units (mbar) as
# p0 (in denom).
#--------------------------------------------------------
# According to Dingman (2002, p. 273), constant should
# be 0.622 instead of 0.662 (Zhang et al., 2000).
#--------------------------------------------------------
const = self.latent_heat_constant
factor = (self.rho_air * self.Lv * self.De)
delta_e = (self.e_air - self.e_surf)
self.Qe = factor * delta_e * (const / self.p0)
# update_latent_heat_flux()
#-------------------------------------------------------------------
def update_conduction_heat_flux(self):
if (self.DEBUG):
print 'Calling update_conduction_heat_flux()...'
#-----------------------------------------------------------------
# Notes: The conduction heat flux from snow to soil for computing
# snowmelt energy, Qm, is close to zero.
# However, the conduction heat flux from surface and sub-
# surface for computing Qet is given by Fourier's Law,
# namely Qc = Ks(Tx - Ts)/x.
# All the Q's have units of W/m^2 = J/(m^2 s).
#-----------------------------------------------------------------
pass # (initialized at start)
# update_conduction_heat_flux()
#-------------------------------------------------------------------
def update_advection_heat_flux(self):
if (self.DEBUG):
print 'Calling update_advection_heat_flux()...'
#------------------------------------------------------
# Notes: All the Q's have units of W/m^2 = J/(m^2 s).
#------------------------------------------------------
pass # (initialized at start)
# update_advection_heat_flux()
#-------------------------------------------------------------------
def update_julian_day(self):
if (self.DEBUG):
print 'Calling update_julian_day()...'
#----------------------------------
# Update the *decimal* Julian day
#----------------------------------
self.julian_day += (self.dt / self.secs_per_day) # [days]
#------------------------------------------
# Compute the offset from True Solar Noon
# clock_hour is in 24-hour military time
# but it can have a decimal part.
#------------------------------------------
dec_part = self.julian_day - np.int16(self.julian_day)
clock_hour = dec_part * self.hours_per_day
## print ' Computing solar_noon...'
solar_noon = solar.True_Solar_Noon( self.julian_day,
self.lon_deg,
self.GMT_offset )
## print ' Computing TSN_offset...'
self.TSN_offset = (clock_hour - solar_noon) # [hours]
# update_julian_day()
#-------------------------------------------------------------------
def update_net_shortwave_radiation(self):
#---------------------------------------------------------
# Notes: If time is before local sunrise or after local
# sunset then Qn_SW should be zero.
#---------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_shortwave_radiation()...'
#---------------------------------------
# Compute Qn_SW for this time [W m-2]
#---------------------------------------
Qn_SW = solar.Clear_Sky_Radiation( self.lat_deg,
self.julian_day,
self.W_p,
self.TSN_offset,
self.alpha,
self.beta,
self.albedo,
self.dust_atten )
self.update_var( 'Qn_SW', Qn_SW ) ## (2/14/17)
# if (np.ndim( self.Qn_SW ) == 0):
# self.Qn_SW.fill( Qn_SW ) #### (mutable scalar)
# else:
# self.Qn_SW[:] = Qn_SW # [W m-2]
# update_net_shortwave_radiation()
#-------------------------------------------------------------------
def update_em_air(self):
if (self.DEBUG):
print 'Calling update_em_air()...'
#---------------------------------------------------------
# NB! The Brutsaert and Satterlund formulas for air
# emissivity as a function of air temperature are in
# close agreement; see compare_em_air_methods().
# However, we must pay close attention to whether
# equations require units of kPa, Pa, or mbar.
#
# 100 kPa = 1 bar = 1000 mbars
# => 1 kPa = 10 mbars
#---------------------------------------------------------
# NB! Temperatures are assumed to be given with units
# of degrees Celsius and are converted to Kelvin
# wherever necessary by adding C_to_K = 273.15.
#
# RH = relative humidity [unitless]
#---------------------------------------------------------
# NB! I'm not sure about how F is added at end because
# of how the equation is printed in Dingman (2002).
# But it reduces to other formulas as it should.
#---------------------------------------------------------
T_air_K = self.T_air + self.C_to_K
if not(self.SATTERLUND):
#-----------------------------------------------------
# Brutsaert (1975) method for computing emissivity
# of the air, em_air. This formula uses e_air with
# units of kPa. (From Dingman (2002, p. 196).)
# See notes for update_vapor_pressure().
#-----------------------------------------------------
e_air_kPa = self.e_air / np.float64(10) # [kPa]
F = self.canopy_factor
C = self.cloud_factor
term1 = (1.0 - F) * 1.72 * (e_air_kPa / T_air_K) ** self.one_seventh
term2 = (1.0 + (0.22 * C ** 2.0))
self.em_air = (term1 * term2) + F
else:
#--------------------------------------------------------
# Satterlund (1979) method for computing the emissivity
# of the air, em_air, that is intended to "correct
# apparent deficiencies in this formulation at air
# temperatures below 0 degrees C" (see G. Liston)
# Liston cites Aase and Idso(1978), Satterlund (1979)
#--------------------------------------------------------
e_air_mbar = self.e_air
eterm = np.exp(-1 * (e_air_mbar)**(T_air_K / 2016) )
self.em_air = 1.08 * (1.0 - eterm)
#--------------------------------------------------------------
# Can't do this yet. em_air is always initialized scalar now
# but may change to grid on assignment. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.em_air ) == 0):
# self.em_air.fill( em_air ) #### (mutable scalar)
# else:
# self.em_air[:] = em_air
# update_em_air()
#-------------------------------------------------------------------
def update_net_longwave_radiation(self):
#----------------------------------------------------------------
# Notes: Net longwave radiation is computed using the
# Stefan-Boltzman law. All four data types
# should be allowed (scalar, time series, grid or
# grid stack).
#
# Qn_LW = (LW_in - LW_out)
# LW_in = em_air * sigma * (T_air + 273.15)^4
# LW_out = em_surf * sigma * (T_surf + 273.15)^4
#
# Temperatures in [deg_C] must be converted to
# [K]. Recall that absolute zero occurs at
# 0 [deg_K] or -273.15 [deg_C].
#
#----------------------------------------------------------------
# First, e_air is computed as:
# e_air = RH * 0.611 * exp[(17.3 * T_air) / (T_air + 237.3)]
# Then, em_air is computed as:
# em_air = (1 - F) * 1.72 * [e_air / (T_air + 273.15)]^(1/7) *
# (1 + 0.22 * C^2) + F
#----------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_longwave_radiation()...'
#--------------------------------
# Compute Qn_LW for this time
#--------------------------------
T_air_K = self.T_air + self.C_to_K
T_surf_K = self.T_surf + self.C_to_K
LW_in = self.em_air * self.sigma * (T_air_K)** 4.0
LW_out = self.em_surf * self.sigma * (T_surf_K)** 4.0
LW_out = LW_out + ((1.0 - self.em_surf) * LW_in)
self.Qn_LW = (LW_in - LW_out) # [W m-2]
#--------------------------------------------------------------
# Can't do this yet. Qn_LW is always initialized grid now
# but will often be created above as a scalar. (9/23/14)
#--------------------------------------------------------------
# if (np.ndim( self.Qn_LW ) == 0):
# self.Qn_LW.fill( Qn_LW ) #### (mutable scalar)
# else:
# self.Qn_LW[:] = Qn_LW # [W m-2]
# update_net_longwave_radiation()
#-------------------------------------------------------------------
def update_net_total_radiation(self):
#-----------------------------------------------
# Notes: Added this on 9/11/14. Not used yet.
#------------------------------------------------------------
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
#------------------------------------------------------------
if (self.DEBUG):
print 'Calling update_net_total_radiation()...'
Qn_tot = self.Qn_SW + self.Qn_LW # [W m-2]
self.update_var( 'Qn_tot', Qn_tot ) ## (2/14/17)
# if (np.ndim( self.Qn_tot ) == 0):
# self.Qn_tot.fill( Qn_tot ) #### (mutable scalar)
# else:
# self.Qn_tot[:] = Qn_tot # [W m-2]
# update_net_total_radiation()
#-------------------------------------------------------------------
def update_net_energy_flux(self):
if (self.DEBUG):
print 'Calling update_net_energy_flux()...'
#------------------------------------------------------
# Notes: Q_sum is used by "snow_energy_balance.py".
#------------------------------------------------------
# Qm = energy used to melt snowpack (if > 0)
# Qn_SW = net shortwave radiation flux (solar)
# Qn_LW = net longwave radiation flux (air, surface)
# Qh = sensible heat flux from turbulent convection
# between snow surface and air
# Qe = latent heat flux from evaporation, sublimation,
# and condensation
# Qa = energy advected by moving water (i.e. rainfall)
# (ARHYTHM assumes this to be negligible; Qa=0.)
# Qc = energy flux via conduction from snow to soil
# (ARHYTHM assumes this to be negligible; Qc=0.)
# Ecc = cold content of snowpack = amount of energy
# needed before snow can begin to melt [J m-2]
# All Q's here have units of [W m-2].
# Are they all treated as positive quantities ?
# rho_air = density of air [kg m-3]
# rho_snow = density of snow [kg m-3]
# Cp_air = specific heat of air [J kg-1 K-1]
# Cp_snow = heat capacity of snow [J kg-1 K-1]
# = ???????? = specific heat of snow
# Kh = eddy diffusivity for heat [m2 s-1]
# Ke = eddy diffusivity for water vapor [m2 s-1]
# Lv = latent heat of vaporization [J kg-1]
# Lf = latent heat of fusion [J kg-1]
# ------------------------------------------------------
# Dn = bulk exchange coeff for the conditions of
# neutral atmospheric stability [m/s]
# Dh = bulk exchange coeff for heat
# De = bulk exchange coeff for vapor
# ------------------------------------------------------
# T_air = air temperature [deg_C]
# T_surf = surface temperature [deg_C]
# T_snow = average snow temperature [deg_C]
# RH = relative humidity [unitless] (in [0,1])
# e_air = air vapor pressure at height z [mbar]
# e_surf = surface vapor pressure [mbar]
# ------------------------------------------------------
# h_snow = snow depth [m]
# z = height where wind speed is uz [m]
# uz = wind speed at height z [m/s]
# p0 = atmospheric pressure [mbar]
# T0 = snow temperature when isothermal [deg_C]
# (This is usually 0.)
# z0_air = surface roughness length scale [m]
# (includes vegetation not covered by snow)
# (Values from page 1033: 0.0013, 0.02 [m])
# kappa = von Karman's constant [unitless] = 0.41
# dt = snowmelt timestep [seconds]
#----------------------------------------------------------------
Q_sum = self.Qn_SW + self.Qn_LW + self.Qh + \
self.Qe + self.Qa + self.Qc # [W m-2]
self.update_var( 'Q_sum', Q_sum ) ## (2/14/17)
# if (np.ndim( self.Q_sum) == 0):
# self.Q_sum.fill( Q_sum ) #### (mutable scalar)
# else:
# self.Q_sum[:] = Q_sum # [W m-2]
# update_net_energy_flux()
#-------------------------------------------------------------------
def open_input_files(self):
if (self.DEBUG):
print 'Calling open_input_files()...'
self.P_file = self.in_directory + self.P_file
self.T_air_file = self.in_directory + self.T_air_file
self.T_surf_file = self.in_directory + self.T_surf_file
self.RH_file = self.in_directory + self.RH_file
self.p0_file = self.in_directory + self.p0_file
self.uz_file = self.in_directory + self.uz_file
self.z_file = self.in_directory + self.z_file
self.z0_air_file = self.in_directory + self.z0_air_file
self.albedo_file = self.in_directory + self.albedo_file
self.em_surf_file = self.in_directory + self.em_surf_file
self.dust_atten_file = self.in_directory + self.dust_atten_file
self.cloud_factor_file = self.in_directory + self.cloud_factor_file
self.canopy_factor_file = self.in_directory + self.canopy_factor_file
self.P_unit = model_input.open_file(self.P_type, self.P_file)
self.T_air_unit = model_input.open_file(self.T_air_type, self.T_air_file)
self.T_surf_unit = model_input.open_file(self.T_surf_type, self.T_surf_file)
self.RH_unit = model_input.open_file(self.RH_type, self.RH_file)
self.p0_unit = model_input.open_file(self.p0_type, self.p0_file)
self.uz_unit = model_input.open_file(self.uz_type, self.uz_file)
self.z_unit = model_input.open_file(self.z_type, self.z_file)
self.z0_air_unit = model_input.open_file(self.z0_air_type, self.z0_air_file)
#-----------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#-----------------------------------------------
self.albedo_unit = model_input.open_file(self.albedo_type,
self.albedo_file)
self.em_surf_unit = model_input.open_file(self.em_surf_type,
self.em_surf_file)
self.dust_atten_unit = model_input.open_file(self.dust_atten_type,
self.dust_atten_file)
self.cloud_factor_unit = model_input.open_file(self.cloud_factor_type,
self.cloud_factor_file)
self.canopy_factor_unit = model_input.open_file(self.canopy_factor_type,
self.canopy_factor_file)
#----------------------------------------------------------------------------
# Note: GMT_offset plus slope and aspect grids will be read separately.
#----------------------------------------------------------------------------
## self.Qn_SW_unit = model_input.open_file(self.Qn_SW_type, self.Qn_SW_file)
## self.Qn_LW_unit = model_input.open_file(self.Qn_LW_type, self.Qn_LW_file)
# open_input_files()
#-------------------------------------------------------------------
def read_input_files(self):
if (self.DEBUG):
print 'Calling read_input_files()...'
rti = self.rti
#--------------------------------------------------------
# All grids are assumed to have a data type of Float32.
#--------------------------------------------------------
# NB! read_next() returns None if TYPE arg is "Scalar".
#--------------------------------------------------------
P = model_input.read_next(self.P_unit, self.P_type, rti,
factor=self.mmph_to_mps)
## print '######### self.P_type = ' + self.P_type
## print '######### np.ndim( P ) = ' + str(np.ndim(P))
if (P is not None):
## print 'MET: (time,P) =', self.time, P
self.update_var( 'P', P ) ### 11/15/16
## if (self.P_type.lower() != 'scalar'):
# if (np.ndim( self.P ) == 0):
# self.P.fill( P ) #### (2/7/13, mutable scalar)
# else:
# self.P = P
if (self.DEBUG or (self.time_index == 0)):
print 'In read_input_files():'
print ' min(P) =', P.min() * self.mps_to_mmph, ' [mmph]'
print ' max(P) =', P.max() * self.mps_to_mmph, ' [mmph]'
print ' '
else:
#-----------------------------------------------
# Either self.P_type is "Scalar" or we've read
# all of the data in the rain_rates file.
#-----------------------------------------------
if (self.P_type.lower() != 'scalar'):
#------------------------------------
# Precip is unique in this respect.
#--------------------------------------------------
# 2/7/13. Note that we don't change P from grid
# to scalar since that could cause trouble for
# other comps that use P, so we just zero it out.
#--------------------------------------------------
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of file:', self.P_file
print ' P set to 0 by read_input_files().'
elif (self.time_sec >= self.dt):
self.P.fill( 0 )
if (self.DEBUG):
print 'Reached end of scalar rainfall duration.'
print ' P set to 0 by read_input_files().'
## print 'time_sec =', self.time_sec
## print 'met dt =', self.dt
## print '######### In met_base.read_input_files() #######'
## print 'self.P_type =', self.P_type
## print 'self.P =', self.P
#------------------------------------------------------------
# Read variables from files into scalars or grids while
# making sure to preserve references (in-place). (11/15/16)
#------------------------------------------------------------
model_input.read_next2(self, 'T_air', rti)
model_input.read_next2(self, 'T_surf', rti)
model_input.read_next2(self, 'RH', rti)
model_input.read_next2(self, 'p0', rti)
model_input.read_next2(self, 'uz', rti)
model_input.read_next2(self, 'z', rti)
model_input.read_next2(self, 'z0_air', rti)
#----------------------------------------------------
model_input.read_next2(self, 'albedo', rti)
model_input.read_next2(self, 'em_surf', rti)
model_input.read_next2(self, 'dust_atten', rti)
model_input.read_next2(self, 'cloud_factor', rti)
model_input.read_next2(self, 'canopy_factor', rti)
###############################################################
# If any of these are scalars (read from a time series file)
# then we'll need to use "fill()" method to prevent breaking
# the reference to the "mutable scalar". (2/7/13)
###############################################################
T_air = model_input.read_next(self.T_air_unit, self.T_air_type, rti)
self.update_var( 'T_air', T_air )
# if (T_air is not None): self.T_air = T_air
T_surf = model_input.read_next(self.T_surf_unit, self.T_surf_type, rti)
self.update_var( 'T_surf', T_surf )
# if (T_surf is not None): self.T_surf = T_surf
RH = model_input.read_next(self.RH_unit, self.RH_type, rti)
self.update_var( 'RH', RH )
# if (RH is not None): self.RH = RH
p0 = model_input.read_next(self.p0_unit, self.p0_type, rti)
self.update_var( 'p0', p0 )
# if (p0 is not None): self.p0 = p0
uz = model_input.read_next(self.uz_unit, self.uz_type, rti)
self.update_var( 'uz', uz )
# if (uz is not None): self.uz = uz
z = model_input.read_next(self.z_unit, self.z_type, rti)
self.update_var( 'z', z )
# if (z is not None): self.z = z
z0_air = model_input.read_next(self.z0_air_unit, self.z0_air_type, rti)
self.update_var( 'z0_air', z0_air )
# if (z0_air is not None): self.z0_air = z0_air
#----------------------------------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#----------------------------------------------------------------------------
# Note: We could later write a version of read_next() that takes "self"
# and "var_name" as args and that uses "exec()".
#----------------------------------------------------------------------------
albedo = model_input.read_next(self.albedo_unit, self.albedo_type, rti)
if (albedo is not None): self.albedo = albedo
em_surf = model_input.read_next(self.em_surf_unit, self.em_surf_type, rti)
if (em_surf is not None): self.em_surf = em_surf
dust_atten = model_input.read_next(self.dust_atten_unit, self.dust_atten_type, rti)
if (dust_atten is not None): self.dust_atten = dust_atten
cloud_factor = model_input.read_next(self.cloud_factor_unit, self.cloud_factor_type, rti)
if (cloud_factor is not None): self.cloud_factor = cloud_factor
canopy_factor = model_input.read_next(self.canopy_factor_unit, self.canopy_factor_type, rti)
if (canopy_factor is not None): self.canopy_factor = canopy_factor
#-------------------------------------------------------------
# Compute Qsw_prefactor from cloud_factor and canopy factor.
#-------------------------------------------------------------
## self.Qsw_prefactor =
#-------------------------------------------------------------
# These are currently treated as input data, but are usually
# generated by functions in Qnet_file.py. Later on, we'll
# provide the option to compute them "on the fly" with new
# functions called "update_net_shortwave_radiation()" and
# "update_net_longwave_radiation()", called from update().
#-------------------------------------------------------------
## Qn_SW = model_input.read_next(self.Qn_SW_unit, self.Qn_SW_type, rti)
## if (Qn_SW is not None): self.Qn_SW = Qn_SW
##
## Qn_LW = model_input.read_next(self.Qn_LW_unit, self.Qn_LW_type, rti)
## if (Qn_LW is not None): self.Qn_LW = Qn_LW
# read_input_files()
#-------------------------------------------------------------------
def close_input_files(self):
if (self.DEBUG):
print 'Calling close_input_files()...'
if (self.P_type != 'Scalar'): self.P_unit.close()
if (self.T_air_type != 'Scalar'): self.T_air_unit.close()
if (self.T_surf_type != 'Scalar'): self.T_surf_unit.close()
if (self.RH_type != 'Scalar'): self.RH_unit.close()
if (self.p0_type != 'Scalar'): self.p0_unit.close()
if (self.uz_type != 'Scalar'): self.uz_unit.close()
if (self.z_type != 'Scalar'): self.z_unit.close()
if (self.z0_air_type != 'Scalar'): self.z0_air_unit.close()
#---------------------------------------------------
# These are needed to compute Qn_SW and Qn_LW.
#---------------------------------------------------
if (self.albedo_type != 'Scalar'): self.albedo_unit.close()
if (self.em_surf_type != 'Scalar'): self.em_surf_unit.close()
if (self.dust_atten_type != 'Scalar'): self.dust_atten_unit.close()
if (self.cloud_factor_type != 'Scalar'): self.cloud_factor_unit.close()
if (self.canopy_factor_type != 'Scalar'): self.canopy_factor_unit.close()
## if (self.Qn_SW_type != 'Scalar'): self.Qn_SW_unit.close()
## if (self.Qn_LW_type != 'Scalar'): self.Qn_LW_unit.close()
## if (self.P_file != ''): self.P_unit.close()
## if (self.T_air_file != ''): self.T_air_unit.close()
## if (self.T_surf_file != ''): self.T_surf_unit.close()
## if (self.RH_file != ''): self.RH_unit.close()
## if (self.p0_file != ''): self.p0_unit.close()
## if (self.uz_file != ''): self.uz_unit.close()
## if (self.z_file != ''): self.z_unit.close()
## if (self.z0_air_file != ''): self.z0_air_unit.close()
## #--------------------------------------------------------
## if (self.Qn_SW_file != ''): self.Qn_SW_unit.close()
## if (self.Qn_LW_file != ''): self.Qn_LW_unit.close()
# close_input_files()
#-------------------------------------------------------------------
def update_outfile_names(self):
if (self.DEBUG):
print 'Calling update_outfile_names()...'
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.ea_gs_file = (self.out_directory + self.ea_gs_file )
self.es_gs_file = (self.out_directory + self.es_gs_file )
self.Qsw_gs_file = (self.out_directory + self.Qsw_gs_file )
self.Qlw_gs_file = (self.out_directory + self.Qlw_gs_file )
self.ema_gs_file = (self.out_directory + self.ema_gs_file )
#------------------------------------------------------------
self.ea_ts_file = (self.out_directory + self.ea_ts_file )
self.es_ts_file = (self.out_directory + self.es_ts_file )
self.Qsw_ts_file = (self.out_directory + self.Qsw_ts_file )
self.Qlw_ts_file = (self.out_directory + self.Qlw_ts_file )
self.ema_ts_file = (self.out_directory + self.ema_ts_file )
## self.ea_gs_file = (self.case_prefix + '_2D-ea.rts')
## self.es_gs_file = (self.case_prefix + '_2D-es.rts')
## #-----------------------------------------------------
## self.ea_ts_file = (self.case_prefix + '_0D-ea.txt')
## self.es_ts_file = (self.case_prefix + '_0D-es.txt')
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
if (self.DEBUG):
print 'Calling open_output_files()...'
model_output.check_netcdf()
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
if (self.SAVE_EA_GRIDS):
model_output.open_new_gs_file( self, self.ea_gs_file, self.rti,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_GRIDS):
model_output.open_new_gs_file( self, self.es_gs_file, self.rti,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_GRIDS):
model_output.open_new_gs_file( self, self.Qsw_gs_file, self.rti,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_GRIDS):
model_output.open_new_gs_file( self, self.Qlw_gs_file, self.rti,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_GRIDS):
model_output.open_new_gs_file( self, self.ema_gs_file, self.rti,
var_name='ema',
long_name='air_emissivity',
units_name='none')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_EA_PIXELS):
model_output.open_new_ts_file( self, self.ea_ts_file, IDs,
## var_name='e_air',
var_name='ea',
long_name='vapor_pressure_in_air',
units_name='mbar')
if (self.SAVE_ES_PIXELS):
model_output.open_new_ts_file( self, self.es_ts_file, IDs,
## var_name='e_surf',
var_name='es',
long_name='vapor_pressure_at_surface',
units_name='mbar')
if (self.SAVE_QSW_PIXELS):
model_output.open_new_ts_file( self, self.Qsw_ts_file, IDs,
var_name='Qsw',
long_name='net_shortwave_radiation',
units_name='W/m^2')
if (self.SAVE_QLW_PIXELS):
model_output.open_new_ts_file( self, self.Qlw_ts_file, IDs,
var_name='Qlw',
long_name='net_longwave_radiation',
units_name='W/m^2')
if (self.SAVE_EMA_PIXELS):
model_output.open_new_ts_file( self, self.ema_ts_file, IDs,
var_name='ema',
long_name='air_emissivity',
units_name='none')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
if (self.DEBUG):
print 'Calling write_output_files()...'
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
if (self.SAVE_EA_GRIDS): model_output.close_gs_file( self, 'ea')
if (self.SAVE_ES_GRIDS): model_output.close_gs_file( self, 'es')
if (self.SAVE_QSW_GRIDS): model_output.close_gs_file( self, 'Qsw')
if (self.SAVE_QLW_GRIDS): model_output.close_gs_file( self, 'Qlw')
if (self.SAVE_EMA_GRIDS): model_output.close_gs_file( self, 'ema')
#-------------------------------------------------------------------
if (self.SAVE_EA_PIXELS): model_output.close_ts_file( self, 'ea')
if (self.SAVE_ES_PIXELS): model_output.close_ts_file( self, 'es')
if (self.SAVE_QSW_PIXELS): model_output.close_ts_file( self, 'Qsw')
if (self.SAVE_QLW_PIXELS): model_output.close_ts_file( self, 'Qlw')
if (self.SAVE_EMA_PIXELS): model_output.close_ts_file( self, 'ema')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
if (self.SAVE_EA_GRIDS):
model_output.add_grid( self, self.e_air, 'ea', self.time_min )
if (self.SAVE_ES_GRIDS):
model_output.add_grid( self, self.e_surf, 'es', self.time_min )
if (self.SAVE_QSW_GRIDS):
model_output.add_grid( self, self.Qn_SW, 'Qsw', self.time_min )
if (self.SAVE_QLW_GRIDS):
model_output.add_grid( self, self.Qn_LW, 'Qlw', self.time_min )
if (self.SAVE_EMA_GRIDS):
model_output.add_grid( self, self.em_air, 'ema', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self):
IDs = self.outlet_IDs
time = self.time_min ######
if (self.SAVE_EA_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_air, 'ea', IDs )
if (self.SAVE_ES_PIXELS):
model_output.add_values_at_IDs( self, time, self.e_surf, 'es', IDs )
if (self.SAVE_QSW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_SW, 'Qsw', IDs )
if (self.SAVE_QLW_PIXELS):
model_output.add_values_at_IDs( self, time, self.Qn_LW, 'Qlw', IDs )
if (self.SAVE_EMA_PIXELS):
model_output.add_values_at_IDs( self, time, self.em_air, 'ema', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
#---------------------------------------------------------------------------------
def compare_em_air_methods():
#--------------------------------------------------------------
# Notes: There are two different methods that are commonly
# used to compute the vapor pressure of air, e_air,
# and then the emissivity of air, em_air, for use in
# longwave radiation calculations. This routine
# compares them graphically.
#
# NB! This hasn't been tested since conversion from IDL.
#-------------------------------------------------------------
import matplotlib.pyplot
T_air = np.arange(80, dtype='Float32') - np.float64(40) #[Celsius] (-40 to 40)
RH = np.float64(1.0)
C2K = np.float64(273.15)
#--------------------------
# Brutsaert (1975) method
#--------------------------
term1 = (np.float64(17.3) * T_air) / (T_air + np.float64(237.3)) ######### DOUBLE CHECK THIS (7/26/13)
e_air1 = RH * np.float64(0.611) * np.exp( term1 ) # [kPa]
em_air1 = np.float64(1.72) * (e_air1 / (T_air + C2K)) ** (np.float64(1) / 7)
#---------------------------
# Satterlund (1979) method
#----------------------------
# NB! e_air has units of Pa
#----------------------------
term2 = np.float64(2353) / (T_air + C2K)
e_air2 = RH * np.float64(10) ** (np.float64(11.40) - term2) # [Pa]
eterm = np.exp(-np.float64(1) * (e_air2 / np.float64(100)) ** ((T_air + C2K) / np.float64(2016)))
em_air2 = np.float64(1.08) * (np.float64(1) - eterm)
#----------------------------
# Plot the two e_air curves
#--------------------------------
# These two agree quite closely
#--------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, e_air1)
matplotlib.pyplot.show()
## oplot(T_air, (e_air2 / np.float64(1000)), psym=-3) # [Pa -> kPa]
#-----------------------------
# Plot the two em_air curves
#--------------------------------------------------
# These two don't agree very well for some reason
#--------------------------------------------------
matplotlib.pyplot.figure(figsize=(8, 6), dpi=80)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(T_air, em_air1)
matplotlib.pyplot.show()
## oplot(T_air, em_air2, psym=-3)
# compare_em_air_Methods
#---------------------------------------------------------------------------------
| mit |
liebermeister/flux-enzyme-cost-minimization | scripts/monod_curve.py | 1 | 6430 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 1 2015
@author: noore
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from scipy.optimize import curve_fit
import definitions as D
import pandas as pd
#LOW_GLUCOSE = D.LOW_CONC['glucoseExt']
LOW_GLUCOSE = 1e-3 # in mM, i.e. 1 uM
MAX_GROWTH_RATE_L = 'max growth rate [h$^{-1}$]'
GROWTH_RATE_LOW_GLU = 'growth rate at\n%g $\mu$M glucose [h$^{-1}$]' % (1e3*LOW_GLUCOSE)
MONOD_COEFF_L = 'Monod coefficient [mM glucose]'
INV_MONOD_COEFF_L = 'inverse of Monod coeff.\n[mM$^{-1}$]'
MAX_GR_OVER_KM_L = 'max. growth rate / $K_{Monod}$ \n[h$^{-1}$ mM$^{-1}$]'
HILL_COEFF_L = 'Hill coefficitent'
MONOD_FUNC = lambda x, gr_max, K_M, h : gr_max / (1 + (K_M/x)**h); p0 = (0.07, 1.0, 1.0)
def calculate_monod_parameters(figure_data):
aerobic_data_df = figure_data['standard']
aerobic_sweep_data_df = figure_data['monod_glucose_aero']
anaerobic_data_df = figure_data['anaerobic'].drop(9999)
anaerobic_sweep_data_df = figure_data['monod_glucose_anae'].drop(9999)
aerobic_sweep_data_df = aerobic_sweep_data_df.transpose().fillna(0)
anaerobic_sweep_data_df = anaerobic_sweep_data_df.transpose().fillna(0)
plot_data = [('aerobic conditions', aerobic_sweep_data_df, aerobic_data_df),
('anaerobic conditions', anaerobic_sweep_data_df, anaerobic_data_df)]
monod_dfs = []
for title, sweep_df, data_df in plot_data:
monod_df = pd.DataFrame(index=sweep_df.columns,
columns=[MAX_GROWTH_RATE_L, MONOD_COEFF_L, HILL_COEFF_L],
dtype=float)
for efm in monod_df.index:
try:
popt, _ = curve_fit(MONOD_FUNC, sweep_df.index, sweep_df[efm],
p0=p0, method='trf')
monod_df.loc[efm, :] = popt
except RuntimeError:
print("cannot resolve Monod curve for EFM %d" % efm)
monod_df.loc[efm, :] = np.nan
# get fig3 data for plotting the other features
monod_df = monod_df.join(data_df)
monod_df[INV_MONOD_COEFF_L] = 1.0/monod_df[MONOD_COEFF_L]
monod_df[MAX_GR_OVER_KM_L] = monod_df[MAX_GROWTH_RATE_L] * monod_df[INV_MONOD_COEFF_L]
# calculate the value of the growth rate using the Monod curve
# for LOW_GLUCOSE
monod_df[GROWTH_RATE_LOW_GLU] = 0
for j in monod_df.index:
monod_df.loc[j, GROWTH_RATE_LOW_GLU] = MONOD_FUNC(LOW_GLUCOSE,
monod_df.at[j, MAX_GROWTH_RATE_L],
monod_df.at[j, MONOD_COEFF_L],
monod_df.at[j, HILL_COEFF_L])
monod_dfs.append((title, monod_df))
return monod_dfs
def plot_monod_scatter(monod_dfs, y_var=MAX_GROWTH_RATE_L):
fig = plt.figure(figsize=(15, 14))
gs1 = gridspec.GridSpec(2, 4, left=0.05, right=0.95, bottom=0.55, top=0.97)
gs2 = gridspec.GridSpec(2, 4, left=0.05, right=0.95, bottom=0.06, top=0.45)
axs = []
for i in range(2):
for j in range(4):
axs.append(plt.subplot(gs1[i, j]))
for i in range(2):
for j in range(4):
axs.append(plt.subplot(gs2[i, j]))
for i, ax in enumerate(axs):
ax.annotate(chr(ord('a')+i), xy=(0.04, 0.95),
xycoords='axes fraction', ha='left', va='top',
size=20)
for i, (title, monod_df) in enumerate(monod_dfs):
xaxis_data = [(INV_MONOD_COEFF_L, (1, 2500), 'log'),
(GROWTH_RATE_LOW_GLU, (0.001, 0.2), 'linear')]
for j, (x_var, xlim, xscale) in enumerate(xaxis_data):
ax_row = axs[4*i + 8*j : 4*i + 8*j + 4]
ax = ax_row[0]
x = monod_df[x_var]
y = monod_df[y_var]
CS = ax.scatter(x, y, s=12, marker='o',
facecolors=(0.85, 0.85, 0.85),
linewidth=0)
for efm, (col, lab) in D.efm_dict.items():
if efm in x.index:
ax.plot(x[efm], y[efm], markersize=5, marker='o',
color=col, label=None)
ax.annotate(lab, xy=(x[efm], y[efm]),
xytext=(0, 5), textcoords='offset points',
ha='center', va='bottom', color=col)
ax.set_xlim(xlim[0], xlim[1])
ax.set_xscale(xscale)
ax.set_title('%s' % title, fontsize=16)
ax.set_xlabel(x_var, fontsize=16)
ax.set_ylabel(y_var, fontsize=16)
plot_parameters = [
{'c': D.OXYGEN_L, 'title': 'oxygen uptake' ,
'ax': ax_row[1], 'vmin': 0, 'vmax': 0.8},
{'c': D.YIELD_L, 'title': 'yield' ,
'ax': ax_row[2], 'vmin': 0, 'vmax': 30},
{'c': D.ACE_L, 'title': 'acetate secretion',
'ax': ax_row[3], 'vmin': 0, 'vmax': 0.6}
]
for d in plot_parameters:
x = monod_df[x_var]
y = monod_df[y_var]
c = monod_df[d['c']]
CS = d['ax'].scatter(x, y, s=12, c=c, marker='o',
linewidth=0, cmap='copper_r',
vmin=d['vmin'], vmax=d['vmax'])
cbar = plt.colorbar(CS, ax=d['ax'])
cbar.set_label(d['c'], fontsize=12)
d['ax'].set_title(d['title'], fontsize=16)
if i % 2 == 1:
d['ax'].set_xlabel(x_var, fontsize=16)
d['ax'].set_xlim(xlim[0], xlim[1])
d['ax'].set_xscale(xscale)
for i in range(16):
axs[i].set_yscale('linear')
axs[i].set_ylim(0, 0.85)
if i % 8 == 0:
axs[i].get_xaxis().set_visible(False)
if i % 4 > 0:
axs[i].get_yaxis().set_visible(False)
return fig
if __name__ == '__main__':
figure_data = D.get_figure_data()
monod_dfs = calculate_monod_parameters(figure_data)
figS17 = plot_monod_scatter(monod_dfs)
D.savefig(figS17, 'S17')
#%%
from pandas import ExcelWriter
writer = ExcelWriter(os.path.join(D.OUTPUT_DIR, 'monod_params.xls'))
for title, monod_df in monod_dfs:
monod_df.to_excel(writer, title)
writer.save() | gpl-2.0 |
pysb/pysb | pysb/examples/run_earm_hpp.py | 5 | 2377 | """ Run the Extrinsic Apoptosis Reaction Model (EARM) using BioNetGen's
Hybrid-Particle Population (HPP) algorithm.
NFsim provides stochastic simulation without reaction network generation,
allowing simulation of models with large (or infinite) reaction networks by
keeping track of species counts. However, it can fail when the number of
instances of a species gets too large (typically >200000). HPP circumvents
this problem by allowing the user to define species with large instance
counts as populations rather than NFsim particles.
This example runs the EARM 1.0 model with HPP, which fails to run on NFsim
with the default settings due to large initial concentration coutns of
several species. By assigning population maps to these species, we can run
the simulation.
Reference: Hogg et al. Plos Comb Biol 2014
https://doi.org/10.1371/journal.pcbi.1003544
"""
from pysb.examples.earm_1_0 import model
from pysb.simulator import BngSimulator
from pysb.simulator.bng import PopulationMap
from pysb import Parameter
import matplotlib.pyplot as plt
import numpy as np
def plot_mean_min_max(name, title=None):
x = np.array([tr[:][name] for tr in trajectories]).T
if not title:
title = name
plt.figure(title)
plt.plot(tout.T, x, '0.5', lw=2, alpha=0.25) # individual trajectories
plt.plot(tout[0], x.mean(1), 'k--', lw=3, label="Mean")
plt.plot(tout[0], x.min(1), 'b--', lw=3, label="Minimum")
plt.plot(tout[0], x.max(1), 'r--', lw=3, label="Maximum")
plt.legend(loc=0)
plt.xlabel('Time')
plt.ylabel('Population of %s' % name)
PARP, CPARP, Mito, mCytoC = [model.monomers[x] for x in
['PARP', 'CPARP', 'Mito', 'mCytoC']]
klump = Parameter('klump', 10000, _export=False)
model.add_component(klump)
population_maps = [
PopulationMap(PARP(b=None), klump),
PopulationMap(CPARP(b=None), klump),
PopulationMap(Mito(b=None), klump),
PopulationMap(mCytoC(b=None), klump)
]
sim = BngSimulator(model, tspan=np.linspace(0, 20000, 101))
simres = sim.run(n_runs=20, method='nf', population_maps=population_maps)
trajectories = simres.all
tout = simres.tout
plot_mean_min_max('Bid_unbound')
plot_mean_min_max('PARP_unbound')
plot_mean_min_max('mSmac_unbound')
plot_mean_min_max('tBid_total')
plot_mean_min_max('CPARP_total')
plot_mean_min_max('cSmac_total')
plt.show()
| bsd-2-clause |
MDAnalysis/mdanalysis | package/MDAnalysis/analysis/encore/dimensionality_reduction/reduce_dimensionality.py | 1 | 9928 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
dimensionality reduction frontend --- :mod:`MDAnalysis.analysis.encore.dimensionality_reduction.reduce_dimensionality`
======================================================================================================================
The module defines a function serving as front-end for various dimensionality
reduction algorithms, wrapping them to allow them to be used interchangably.
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
"""
import numpy as np
from ..confdistmatrix import get_distance_matrix
from ..utils import ParallelCalculation, merge_universes
from ..dimensionality_reduction.DimensionalityReductionMethod import (
StochasticProximityEmbeddingNative)
def reduce_dimensionality(ensembles,
method=StochasticProximityEmbeddingNative(),
select="name CA",
distance_matrix=None,
allow_collapsed_result=True,
ncores=1,
**kwargs):
"""
Reduce dimensions in frames from one or more ensembles, using one or more
dimensionality reduction methods. The function optionally takes
pre-calculated distances matrices as an argument. Note that not all
dimensionality reduction procedure can work directly on distance matrices,
so the distance matrices might be ignored for particular choices of
method.
Parameters
----------
ensembles : MDAnalysis.Universe, or list or list of list thereof
The function takes either a single Universe object, a list of Universe
objects or a list of lists of Universe objects. If given a single
universe, it simply works on the conformations in the trajectory. If
given a list of ensembles, it will merge them and analyse them together,
keeping track of the ensemble to which each of the conformations belong.
Finally, if passed a list of list of ensembles, the function will just
repeat the functionality just described - merging ensembles for each
ensemble in the outer loop.
method : MDAnalysis.analysis.encore.dimensionality_reduction.DimensionalityReductionMethod or list
A single or a list of instances of the DimensionalityReductionMethod
classes from the dimensionality_reduction module. A separate analysis
will be run for each method. Note that different parameters for the
same method can be explored by adding different instances of
the same dimensionality reduction class. Options are Stochastic
Proximity Embedding or Principal Component Analysis.
select : str, optional
Atom selection string in the MDAnalysis format (default is "name CA")
distance_matrix : encore.utils.TriangularMatrix, optional
Distance matrix for stochastic proximity embedding. If this parameter
is not supplied an RMSD distance matrix will be calculated on the fly (default).
If several distance matrices are supplied, an analysis will be done
for each of them. The number of provided distance matrices should
match the number of provided ensembles.
allow_collapsed_result: bool, optional
Whether a return value of a list of one value should be collapsed
into just the value (default = True).
ncores : int, optional
Maximum number of cores to be used (default is 1).
Returns
-------
list of coordinate arrays in the reduced dimensions (or potentially a single
coordinate array object if allow_collapsed_result is set to True)
Example
-------
Two ensembles are created as Universe object using a topology file and
two trajectories. The topology- and trajectory files used are obtained
from the MDAnalysis test suite for two different simulations of the protein
AdK.
Here, we reduce two ensembles to two dimensions, and plot the result using
matplotlib: ::
>>> from MDAnalysis import Universe
>>> import MDAnalysis.analysis.encore as encore
>>> from MDAnalysis.tests.datafiles import PSF, DCD, DCD2
>>> ens1 = Universe(PSF, DCD)
>>> ens2 = Universe(PSF, DCD2)
>>> coordinates, details = encore.reduce_dimensionality([ens1,ens2])
>>> plt.scatter(coordinates[0], coordinates[1],
color=[["red", "blue"][m-1] for m
in details["ensemble_membership"]])
Note how we extracted information about which conformation belonged to
which ensemble from the details variable.
You can change the parameters of the dimensionality reduction method
by explicitly specifying the method ::
>>> coordinates, details =
encore.reduce_dimensionality([ens1,ens2],
method=encore.StochasticProximityEmbeddingNative(dimension=3))
Here is an illustration using Principal Component Analysis, instead
of the default dimensionality reduction method ::
>>> coordinates, details =
encore.reduce_dimensionality(
[ens1,ens2],
method=encore.PrincipalComponentAnalysis(dimension=2))
You can also combine multiple methods in one call ::
>>> coordinates, details =
encore.reduce_dimensionality(
[ens1,ens2],
method=[encore.PrincipalComponentAnalysis(dimension=2),
encore.StochasticProximityEmbeddingNative(dimension=2)])
"""
if ensembles is not None:
if not hasattr(ensembles, '__iter__'):
ensembles = [ensembles]
ensembles_list = ensembles
if not hasattr(ensembles[0], '__iter__'):
ensembles_list = [ensembles]
# Calculate merged ensembles and transfer to memory
merged_ensembles = []
for ensembles in ensembles_list:
# Transfer ensembles to memory
for ensemble in ensembles:
ensemble.transfer_to_memory()
merged_ensembles.append(merge_universes(ensembles))
methods = method
if not hasattr(method, '__iter__'):
methods = [method]
# Check whether any of the methods can make use of a distance matrix
any_method_accept_distance_matrix = \
np.any([_method.accepts_distance_matrix for _method in
methods])
# If distance matrices are provided, check that it matches the number
# of ensembles
if distance_matrix:
if not hasattr(distance_matrix, '__iter__'):
distance_matrix = [distance_matrix]
if ensembles is not None and \
len(distance_matrix) != len(merged_ensembles):
raise ValueError("Dimensions of provided list of distance matrices "
"does not match that of provided list of "
"ensembles: {0} vs {1}"
.format(len(distance_matrix),
len(merged_ensembles)))
else:
# Calculate distance matrices for all merged ensembles - if not provided
if any_method_accept_distance_matrix:
distance_matrix = []
for merged_ensemble in merged_ensembles:
distance_matrix.append(get_distance_matrix(merged_ensemble,
select=select,
**kwargs))
args = []
for method in methods:
if method.accepts_distance_matrix:
args += [(d,) for d in distance_matrix]
else:
for merged_ensemble in merged_ensembles:
coordinates = merged_ensemble.trajectory.timeseries(order="fac")
# Flatten coordinate matrix into n_frame x n_coordinates
coordinates = np.reshape(coordinates,
(coordinates.shape[0], -1))
args.append((coordinates,))
# Execute dimensionality reduction procedure
pc = ParallelCalculation(ncores, methods, args)
# Run parallel calculation
results = pc.run()
# Keep track of which sample belongs to which ensembles
details = {}
if ensembles is not None:
ensemble_assignment = []
for i, ensemble in enumerate(ensembles):
ensemble_assignment += [i+1]*len(ensemble.trajectory)
ensemble_assignment = np.array(ensemble_assignment)
details['ensemble_membership'] = ensemble_assignment
coordinates = []
for result in results:
coordinates.append(result[1][0])
# details.append(result[1][1])
if allow_collapsed_result and len(coordinates)==1:
coordinates = coordinates[0]
# details = details[0]
return coordinates, details
| gpl-2.0 |
cactusbin/nyt | matplotlib/examples/user_interfaces/embedding_in_tk.py | 9 | 1419 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
| unlicense |
e-matteson/pipit-keyboard | extras/audio/make_audio_files.py | 1 | 2652 | #!/bin/python2
from __future__ import division
import subprocess
from time import sleep
import os
# for generating sound files
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.wavfile
import scipy.signal as sig
import scipy.stats as stats
master_volume = 1
sounds = {
'A':{'filename':'tick1.wav',
'volume': .9,
'freq': 10,
'length': .01,
'quality': 1,
'tone':'sawtooth',
'a': 2,
'b': 10,},
'W':{'filename':'tick2.wav',
'volume': .8,
'freq': 5,
'length': .01,
'quality': .8,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'M':{'filename':'tick3.wav',
'volume': .8,
'freq': 10,
'length': .05,
'quality': .95,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'S':{'filename':'tick4.wav',
'volume': .4,
'freq': 50,
'length': .04,
'quality': .6,
'tone':'sawtooth',
'a': 2,
'b': 5,},
'U':{'filename':'tick5.wav',
'volume': .5,
'freq': 40,
'length': .02,
'quality': .9,
'tone':'sawtooth',
'a': 2,
'b': 5,},
}
def construct_sound(params, plot_sound=False):
print "constructing sound: %s" % params['filename']
rate = 44100
N = int(rate*params['length'])
time = range(N)
if params['tone'] == 'sawtooth':
raw = sig.sawtooth(np.linspace(0,params['freq'],N))
elif params['tone'] == 'sine':
# not succesfully tested, try plotting
raw = np.sin(np.linspace(0,params['freq'],N))
else:
raise RuntimeError('unknown tone type')
noise = np.random.uniform(-1, 1, N) # 44100 random samples between -1 and 1
envelope = stats.beta(params['a'],params['b']).pdf([n/N for n in time])
data = raw*params['quality'] + noise*(1-params['quality'])
data *= envelope
save_wav(data, params['filename'], params['volume'])
if plot_sound:
figure()
plt.plot(time, raw)
plt.plot(time, envelope)
plt.plot(time, data)
plt.show()
def save_wav(data, filename, volume=1):
total_volume = volume * master_volume
if total_volume > 1 or total_volume < 0:
raise RuntimeError('volume out of range')
scaled_data = np.int16(data/np.max(np.abs(data)) * total_volume * 32767)
scipy.io.wavfile.write(filename, 44100, scaled_data)
def main():
data = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] * 10000
save_wav(data, "test.wav")
# for code in sounds.keys():
# construct_sound(sounds[code])
main()
| gpl-3.0 |
arjunkhode/ASP | lectures/03-Fourier-properties/plots-code/symmetry-real-even.py | 26 | 1150 | import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
| agpl-3.0 |
alexlib/openpiv-python | setup.py | 2 | 1786 | from os import path
from setuptools import setup, find_packages
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
# with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="OpenPIV",
version='0.23.6',
packages=find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=[
'setuptools',
],
install_requires=[
'numpy',
'imageio',
'matplotlib>=3',
'scikit-image',
'scipy',
'natsort',
'GitPython',
'pytest',
'tqdm'
],
classifiers=[
# PyPI-specific version type. The number specified here is a magic
# constant
# with no relation to this application's version numbering scheme.
# *sigh*
'Development Status :: 4 - Beta',
# Sublist of all supported Python versions.
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Sublist of all supported platforms and environments.
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
# Miscellaneous metadata.
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
],
# long_description=long_description,
# long_description_content_type='text/markdown'
)
| gpl-3.0 |
lenovor/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
pllim/ginga | ginga/rv/plugins/Preferences.py | 1 | 63607 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Make changes to channel settings graphically in the UI.
**Plugin Type: Local**
``Preferences`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
The ``Preferences`` plugin sets the preferences on a per-channel basis.
The preferences for a given channel are inherited from the "Image"
channel until they are explicitly set and saved using this plugin.
If "Save Settings" is pressed, it will save the settings to the user's
home Ginga folder so that when a channel with the same name is created
in future Ginga sessions it will obtain the same settings.
**Color Distribution Preferences**
.. figure:: figures/cdist-prefs.png
:align: center
:alt: Color Distribution preferences
"Color Distribution" preferences.
The "Color Distribution" preferences control the preferences used for the
data value to color index conversion that occurs after cut levels are
applied and just before final color mapping is performed. It concerns
how the values between the low and high cut levels are distributed to
the color and intensity mapping phase.
The "Algorithm" control is used to set the algorithm used for the
mapping. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control. There are eight
algorithms available: linear, log, power, sqrt, squared, asinh, sinh,
and histeq. The name of each algorithm is indicative of how
the data is mapped to the colors in the color map. "linear" is the
default.
**Color Mapping Preferences**
.. figure:: figures/cmap-prefs.png
:align: center
:alt: Color Mapping preferences
"Color Mapping" preferences.
The "Color Mapping" preferences control the preferences used for the
color map and intensity map, used during the final phase of the color
mapping process. Together with the "Color Distribution" preferences, these
control the mapping of data values into a 24-bpp RGB visual representation.
The "Colormap" control selects which color map should be loaded and
used. Click the control to show the list, or simply scroll the mouse
wheel while hovering the cursor over the control.
The "Intensity" control selects which intensity map should be used
with the color map. The intensity map is applied just before the color
map, and can be used to change the standard linear scale of values into
an inverted scale, logarithmic, etc.
Ginga comes with a good selection of color maps, but should you want
more, you can add custom ones or, if ``matplotlib`` is installed, you
can load all the ones that it has.
See "Customizing Ginga" for details.
**Zoom Preferences**
.. figure:: figures/zoom-prefs.png
:align: center
:alt: Zoom preferences
"Zoom" preferences.
The "Zoom" preferences control Ginga's zooming/scaling behavior.
Ginga supports two zoom algorithms, chosen using the "Zoom Alg" control:
* The "step" algorithm zooms the image inwards in discrete
steps of 1X, 2X, 3X, etc. or outwards in steps of 1/2X, 1/3X, 1/4X,
etc. This algorithm results in the least artifacts visually, but is a
bit slower to zoom over wide ranges when using a scrolling motion
because more "throw" is required to achieve a large zoom change
(this is not the case if one uses of the shortcut zoom keys, such as
the digit keys).
* The "rate" algorithm zooms the image by advancing the scaling at
a rate defined by the value in the "Zoom Rate" box. This rate defaults
to the square root of 2. Larger numbers cause larger changes in scale
between zoom levels. If you like to zoom your images rapidly, at a
small cost in image quality, you would likely want to choose this
option.
Note that regardless of which method is chosen for the zoom algorithm,
the zoom can be controlled by holding down ``Ctrl`` (coarse) or ``Shift``
(fine) while scrolling to constrain the zoom rate (assuming the default
mouse bindings).
The "Stretch XY" control can be used to stretch one of the axes (X or
Y) relative to the other. Select an axis with this control and roll the
scroll wheel while hovering over the "Stretch Factor" control to
stretch the pixels in the selected axis.
The "Scale X" and "Scale Y" controls offer direct access to the
underlying scaling, bypassing the discrete zoom steps. Here, exact
values can be typed to scale the image. Conversely, you will see these
values change as the image is zoomed.
The "Scale Min" and "Scale Max" controls can be used to place a
limit on how much the image can be scaled.
The "Zoom Defaults" button will restore the controls to the Ginga
default values.
**Pan Preferences**
.. figure:: figures/pan-prefs.png
:align: center
:alt: Pan Preferences
"Pan" preferences.
The "Pan" preferences control Ginga's panning behavior.
The "Pan X" and "Pan Y" controls offer direct access to set the pan
position in the image (the part of the image located at the center of
the window) -- you can see them change as you pan around the image.
The "Center Image" button sets the pan position to the center of the
image, as calculated by halving the dimensions in X and Y.
The "Mark Center" check box, when checked, will cause Ginga to draw a
small reticle in the center of the image. This is useful for knowing
the pan position and for debugging.
**Transform Preferences**
.. figure:: figures/transform-prefs.png
:align: center
:alt: Transform Preferences
"Transform" preferences.
The "Transform" preferences provide for transforming the view of the image
by flipping the view in X or Y, swapping the X and Y axes, or rotating
the image in arbitrary amounts.
The "Flip X" and "Flip Y" checkboxes cause the image view to be
flipped in the corresponding axis.
The "Swap XY" checkbox causes the image view to be altered by swapping
the X and Y axes. This can be combined with "Flip X" and "Flip Y" to rotate
the image in 90 degree increments. These views will render more quickly
than arbitrary rotations using the "Rotate" control.
The "Rotate" control will rotate the image view the specified amount.
The value should be specified in degrees. "Rotate" can be specified in
conjunction with flipping and swapping.
The "Restore" button will restore the view to the default view, which
is unflipped, unswapped, and unrotated.
**Auto Cuts Preferences**
.. figure:: figures/autocuts-prefs.png
:align: center
:alt: Auto Cuts Preferences
"Auto Cuts" preferences.
The "Auto Cuts" preferences control the calculation of cut levels for
the view when the auto cut levels button or key is pressed, or when
loading a new image with auto cuts enabled. You can also set the cut
levels manually from here.
The "Cut Low" and "Cut High" fields can be used to manually specify lower
and upper cut levels. Pressing "Cut Levels" will set the levels to these
values manually. If a value is missing, it is assumed to default to the
whatever the current value is.
Pressing "Auto Levels" will calculate the levels according to an algorithm.
The "Auto Method" control is used to choose which auto cuts algorithm
used: "minmax" (minimum maximum values), "median" (based on median
filtering), "histogram" (based on an image histogram), "stddev" (based on
the standard deviation of pixel values), or "zscale" (based on the ZSCALE
algorithm popularized by IRAF).
As the algorithm is changed, the boxes under it may also change to
allow changes to parameters particular to each algorithm.
**WCS Preferences**
.. figure:: figures/wcs-prefs.png
:align: center
:alt: WCS Preferences
"WCS" preferences.
The "WCS" preferences control the display preferences for the World
Coordinate System (WCS) calculations used to report the cursor position in the
image.
The "WCS Coords" control is used to select the coordinate system in
which to display the result.
The "WCS Display" control is used to select a sexagesimal (``H:M:S``)
readout or a decimal degrees readout.
**New Image Preferences**
.. figure:: figures/newimages-prefs.png
:align: center
:alt: New Image Preferences
"New Image" preferences.
The "New Images" preferences determine how Ginga reacts when a new image
is loaded into the channel. This includes when an older image is
revisited by clicking on its thumbnail in the ``Thumbs`` plugin pane.
The "Cut New" setting controls whether an automatic cut-level
calculation should be performed on the new image, or whether the
currently set cut levels should be applied. The possible settings are:
* "on": calculate a new cut levels always;
* "override": calculate a new cut levels until the user overrides
it by manually setting a cut levels, then turn "off"; or
* "off": always use the currently set cut levels.
.. tip:: The "override" setting is provided for the convenience of
having automatic cut levels, while preventing a manually set
cuts from being overridden when a new image is ingested. When
typed in the image window, the semicolon key can be used to
toggle the mode back to override (from "off"), while colon will
set the preference to "on". The ``Info`` panel shows
the state of this setting.
The "Zoom New" setting controls whether a newly visited image should
be zoomed to fit the window. There are three possible values: on,
override, and off:
* "on": the new image is always zoomed to fit;
* "override": images are automatically fitted until the zoom level is
changed manually, then the mode automatically changes to "off", or
* "off": always use the currently set zoom levels.
.. tip:: The "override" setting is provided for the convenience of
having an automatic zoom, while preventing a manually set zoom
level from being overridden when a new image is ingested. When
typed in the image window, the apostrophe (a.k.a. "single quote")
key can be used to toggle the mode back to "override" (from
"off"), while quote (a.k.a. double quote) will set the preference
to "on". The global plugin ``Info`` panel shows the state of this
setting.
The "Center New" box, if checked, will cause newly visited images to
always have the pan position reset to the center of the image. If
unchecked, the pan position is unchanged from the previous image.
The "Follow New" setting is used to control whether Ginga will change
the display if a new image is loaded into the channel. If unchecked,
the image is loaded (as seen, for example, by its appearance in the
``Thumbs`` tab), but the display will not change to the new image. This
setting is useful in cases where new images are being loaded by some
automated means into a channel and the user wishes to study the current
image without being interrupted.
The "Raise New" setting controls whether Ginga will raise the tab of a
channel when an image is loaded into that channel. If unchecked, then
Ginga will not raise the tab when an image is loaded into that
particular channel.
The "Create Thumbnail" setting controls whether Ginga will create a
thumbnail for images loaded into that channel. In cases where many
images are being loaded into a channel frequently (e.g., a low frequency
video feed), it may be undesirable to create thumbnails for all of them.
**General Preferences**
The "Num Images" setting specifies how many images can be retained in
buffers in this channel before being ejected. A value of zero (0) means
unlimited--images will never be ejected. If an image was loaded from
some accessible storage and it is ejected, it will automatically be
reloaded if the image is revisited by navigating the channel.
The "Sort Order" setting determines whether images are sorted in the
channel alphabetically by name or by the time when they were loaded.
This principally affects the order in which images are cycled when using
the up/down "arrow" keys or buttons, and not necessarily how they are
displayed in plugins like "Contents" or "Thumbs" (which generally have
their own setting preference for ordering).
The "Use scrollbars" check box controls whether the channel viewer will
show scroll bars around the edge of the viewer frame.
**Remember Preferences**
When an image is loaded, a profile is created and attached to the image
metadata in the channel. These profiles are continuously updated with
viewer state as the image is manipulated. The "Remember" preferences
control which parts of these profiles are restored to the viewer state
when the image is navigated to in the channel:
* "Restore Scale" will restore the zoom (scale) level
* "Restore Pan" will restore the pan position
* "Restore Transform" will restore any flip or swap axes transforms
* "Restore Rotation" will restore any rotation of the image
* "Restore Cuts" will restore any cut levels for the image
* "Restore Scale" will restore any coloring adjustments made (including
color map, color distribution, contrast/stretch, etc.)
"""
import math
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga import cmap, imap, trcalc
from ginga import GingaPlugin
from ginga import AutoCuts, ColorDist
from ginga.util import wcs, wcsmod, rgb_cms
__all_ = ['Preferences']
class Preferences(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Preferences, self).__init__(fv, fitsimage)
self.cmap_names = cmap.get_names()
self.imap_names = imap.get_names()
self.zoomalg_names = ('step', 'rate')
# get Preferences preferences
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_Preferences')
self.settings.add_defaults(orientation=None)
self.settings.load(onError='silent')
self.t_ = self.fitsimage.get_settings()
self.autocuts_cache = {}
self.gui_up = False
self.calg_names = ColorDist.get_dist_names()
self.autozoom_options = self.fitsimage.get_autozoom_options()
self.autocut_options = self.fitsimage.get_autocuts_options()
self.autocut_methods = self.fitsimage.get_autocut_methods()
self.autocenter_options = self.fitsimage.get_autocenter_options()
self.pancoord_options = ('data', 'wcs')
self.sort_options = ('loadtime', 'alpha')
for key in ['color_map', 'intensity_map',
'color_algorithm', 'color_hashsize']:
self.t_.get_setting(key).add_callback(
'set', self.rgbmap_changed_ext_cb)
self.t_.get_setting('autozoom').add_callback(
'set', self.autozoom_changed_ext_cb)
self.t_.get_setting('autocenter').add_callback(
'set', self.autocenter_changed_ext_cb)
self.t_.get_setting('autocuts').add_callback(
'set', self.autocuts_changed_ext_cb)
for key in ['switchnew', 'raisenew', 'genthumb']:
self.t_.get_setting(key).add_callback(
'set', self.set_chprefs_ext_cb)
for key in ['pan']:
self.t_.get_setting(key).add_callback(
'set', self.pan_changed_ext_cb)
for key in ['scale']:
self.t_.get_setting(key).add_callback(
'set', self.scale_changed_ext_cb)
self.t_.get_setting('zoom_algorithm').add_callback(
'set', self.set_zoomalg_ext_cb)
self.t_.get_setting('zoom_rate').add_callback(
'set', self.set_zoomrate_ext_cb)
for key in ['scale_x_base', 'scale_y_base']:
self.t_.get_setting(key).add_callback(
'set', self.scalebase_changed_ext_cb)
self.t_.get_setting('rot_deg').add_callback(
'set', self.set_rotate_ext_cb)
for name in ('flip_x', 'flip_y', 'swap_xy'):
self.t_.get_setting(name).add_callback(
'set', self.set_transform_ext_cb)
self.t_.get_setting('autocut_method').add_callback('set',
self.set_autocut_method_ext_cb)
self.t_.get_setting('autocut_params').add_callback('set',
self.set_autocut_params_ext_cb)
self.t_.get_setting('cuts').add_callback(
'set', self.cutset_cb)
self.t_.setdefault('wcs_coords', 'icrs')
self.t_.setdefault('wcs_display', 'sexagesimal')
# buffer len (number of images in memory)
self.t_.add_defaults(numImages=4)
self.t_.get_setting('numImages').add_callback('set', self.set_buflen_ext_cb)
# preload images
self.t_.add_defaults(preload_images=False)
self.icc_profiles = list(rgb_cms.get_profiles())
self.icc_profiles.insert(0, None)
self.icc_intents = rgb_cms.get_intents()
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container,
orientation=self.settings.get('orientation', None))
self.orientation = orientation
#vbox.set_border_width(4)
vbox.set_spacing(2)
# COLOR DISTRIBUTION OPTIONS
fr = Widgets.Frame("Color Distribution")
captions = (('Algorithm:', 'label', 'Algorithm', 'combobox'),
#('Table Size:', 'label', 'Table Size', 'entryset'),
('Dist Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.calg_choice = b.algorithm
#self.w.table_size = b.table_size
b.algorithm.set_tooltip("Choose a color distribution algorithm")
#b.table_size.set_tooltip("Set size of the distribution hash table")
b.dist_defaults.set_tooltip("Restore color distribution defaults")
b.dist_defaults.add_callback('activated',
lambda w: self.set_default_distmaps())
combobox = b.algorithm
options = []
index = 0
for name in self.calg_names:
options.append(name)
combobox.append_text(name)
index += 1
try:
index = self.calg_names.index(self.t_.get('color_algorithm',
"linear"))
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_calg_cb)
## entry = b.table_size
## entry.set_text(str(self.t_.get('color_hashsize', 65535)))
## entry.add_callback('activated', self.set_tablesize_cb)
fr.set_widget(w)
vbox.add_widget(fr)
# COLOR MAPPING OPTIONS
fr = Widgets.Frame("Color Mapping")
captions = (('Colormap:', 'label', 'Colormap', 'combobox'),
('Intensity:', 'label', 'Intensity', 'combobox'),
('Color Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.cmap_choice = b.colormap
self.w.imap_choice = b.intensity
b.color_defaults.add_callback('activated',
lambda w: self.set_default_cmaps())
b.colormap.set_tooltip("Choose a color map for this image")
b.intensity.set_tooltip("Choose an intensity map for this image")
b.color_defaults.set_tooltip("Restore default color and intensity maps")
fr.set_widget(w)
vbox.add_widget(fr)
combobox = b.colormap
options = []
index = 0
for name in self.cmap_names:
options.append(name)
combobox.append_text(name)
index += 1
cmap_name = self.t_.get('color_map', "gray")
try:
index = self.cmap_names.index(cmap_name)
except Exception:
index = self.cmap_names.index('gray')
combobox.set_index(index)
combobox.add_callback('activated', self.set_cmap_cb)
combobox = b.intensity
options = []
index = 0
for name in self.imap_names:
options.append(name)
combobox.append_text(name)
index += 1
imap_name = self.t_.get('intensity_map', "ramp")
try:
index = self.imap_names.index(imap_name)
except Exception:
index = self.imap_names.index('ramp')
combobox.set_index(index)
combobox.add_callback('activated', self.set_imap_cb)
# AUTOCUTS OPTIONS
fr = Widgets.Frame("Auto Cuts")
vbox2 = Widgets.VBox()
fr.set_widget(vbox2)
captions = (('Cut Low:', 'label', 'Cut Low Value', 'llabel',
'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High Value', 'llabel',
'Cut High', 'entry'),
('spacer_1', 'spacer', 'spacer_2', 'spacer',
'Cut Levels', 'button'),
('Auto Method:', 'label', 'Auto Method', 'combobox',
'Auto Levels', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
loval, hival = self.t_['cuts']
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_low.set_length(9)
b.cut_low_value.set_text('%.4g' % (loval))
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.cut_high.set_length(9)
b.cut_high_value.set_text('%.4g' % (hival))
b.cut_low.add_callback('activated', self.cut_levels)
b.cut_high.add_callback('activated', self.cut_levels)
b.cut_levels.add_callback('activated', self.cut_levels)
b.auto_levels.add_callback('activated', self.auto_levels)
# Setup auto cuts method choice
combobox = b.auto_method
index = 0
method = self.t_.get('autocut_method', "histogram")
for name in self.autocut_methods:
combobox.append_text(name)
index += 1
try:
index = self.autocut_methods.index(method)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_autocut_method_cb)
b.auto_method.set_tooltip("Choose algorithm for auto levels")
vbox2.add_widget(w, stretch=0)
self.w.acvbox = Widgets.VBox()
vbox2.add_widget(self.w.acvbox, stretch=1)
vbox.add_widget(fr, stretch=0)
# TRANSFORM OPTIONS
fr = Widgets.Frame("Transform")
captions = (('Flip X', 'checkbutton', 'Flip Y', 'checkbutton',
'Swap XY', 'checkbutton'),
('Rotate:', 'label', 'Rotate', 'spinfloat'),
('Restore', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
for name in ('flip_x', 'flip_y', 'swap_xy'):
btn = b[name]
btn.set_state(self.t_.get(name, False))
btn.add_callback('activated', self.set_transforms_cb)
b.flip_x.set_tooltip("Flip the image around the X axis")
b.flip_y.set_tooltip("Flip the image around the Y axis")
b.swap_xy.set_tooltip("Swap the X and Y axes in the image")
b.rotate.set_tooltip("Rotate the image around the pan position")
b.restore.set_tooltip("Clear any transforms and center image")
b.restore.add_callback('activated', self.restore_cb)
b.rotate.set_limits(0.00, 359.99999999, incr_value=10.0)
b.rotate.set_value(0.00)
b.rotate.set_decimals(8)
b.rotate.add_callback('value-changed', self.rotate_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# WCS OPTIONS
fr = Widgets.Frame("WCS")
captions = (('WCS Coords:', 'label', 'WCS Coords', 'combobox'),
('WCS Display:', 'label', 'WCS Display', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.wcs_coords.set_tooltip("Set WCS coordinate system")
b.wcs_display.set_tooltip("Set WCS display format")
# Setup WCS coords method choice
combobox = b.wcs_coords
index = 0
for name in wcsmod.coord_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_coords', "")
try:
index = wcsmod.coord_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
# Setup WCS display format method choice
combobox = b.wcs_display
index = 0
for name in wcsmod.display_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# ZOOM OPTIONS
fr = Widgets.Frame("Zoom")
captions = (('Zoom Alg:', 'label', 'Zoom Alg', 'combobox'),
('Zoom Rate:', 'label', 'Zoom Rate', 'spinfloat'),
('Stretch XY:', 'label', 'Stretch XY', 'combobox'),
('Stretch Factor:', 'label', 'Stretch Factor', 'spinfloat'),
('Scale X:', 'label', 'Scale X', 'entryset'),
('Scale Y:', 'label', 'Scale Y', 'entryset'),
('Scale Min:', 'label', 'Scale Min', 'entryset'),
('Scale Max:', 'label', 'Scale Max', 'entryset'),
('Interpolation:', 'label', 'Interpolation', 'combobox'),
('Zoom Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
index = 0
for name in self.zoomalg_names:
b.zoom_alg.append_text(name.capitalize())
index += 1
zoomalg = self.t_.get('zoom_algorithm', "step")
try:
index = self.zoomalg_names.index(zoomalg)
b.zoom_alg.set_index(index)
except Exception:
pass
b.zoom_alg.set_tooltip("Choose Zoom algorithm")
b.zoom_alg.add_callback('activated', self.set_zoomalg_cb)
index = 0
for name in ('X', 'Y'):
b.stretch_xy.append_text(name)
index += 1
b.stretch_xy.set_index(0)
b.stretch_xy.set_tooltip("Stretch pixels in X or Y")
b.stretch_xy.add_callback('activated', self.set_stretch_cb)
b.stretch_factor.set_limits(1.0, 10.0, incr_value=0.10)
b.stretch_factor.set_value(1.0)
b.stretch_factor.set_decimals(8)
b.stretch_factor.add_callback('value-changed', self.set_stretch_cb)
b.stretch_factor.set_tooltip("Length of pixel relative to 1 on other side")
b.stretch_factor.set_enabled(zoomalg != 'step')
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
b.zoom_rate.set_limits(1.01, 10.0, incr_value=0.1)
b.zoom_rate.set_value(zoomrate)
b.zoom_rate.set_decimals(8)
b.zoom_rate.set_enabled(zoomalg != 'step')
b.zoom_rate.set_tooltip("Step rate of increase/decrease per zoom level")
b.zoom_rate.add_callback('value-changed', self.set_zoomrate_cb)
b.zoom_defaults.add_callback('activated', self.set_zoom_defaults_cb)
scale_x, scale_y = self.fitsimage.get_scale_xy()
b.scale_x.set_tooltip("Set the scale in X axis")
b.scale_x.set_text(str(scale_x))
b.scale_x.add_callback('activated', self.set_scale_cb)
b.scale_y.set_tooltip("Set the scale in Y axis")
b.scale_y.set_text(str(scale_y))
b.scale_y.add_callback('activated', self.set_scale_cb)
scale_min, scale_max = self.t_['scale_min'], self.t_['scale_max']
b.scale_min.set_text(str(scale_min))
b.scale_min.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the minimum allowed scale in any axis")
b.scale_max.set_text(str(scale_max))
b.scale_max.add_callback('activated', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the maximum allowed scale in any axis")
index = 0
for name in trcalc.interpolation_methods:
b.interpolation.append_text(name)
index += 1
interp = self.t_.get('interpolation', "basic")
try:
index = trcalc.interpolation_methods.index(interp)
except ValueError:
# previous choice might not be available if preferences
# were saved when opencv was being used--if so, default
# to "basic"
index = trcalc.interpolation_methods.index('basic')
b.interpolation.set_index(index)
b.interpolation.set_tooltip("Choose interpolation method")
b.interpolation.add_callback('activated', self.set_interp_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# PAN OPTIONS
fr = Widgets.Frame("Panning")
captions = (('Pan X:', 'label', 'Pan X', 'entry',
'WCS sexagesimal', 'checkbutton'),
('Pan Y:', 'label', 'Pan Y', 'entry',
'Apply Pan', 'button'),
('Pan Coord:', 'label', 'Pan Coord', 'combobox'),
('Center Image', 'button', 'Mark Center', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
pan_x, pan_y = self.fitsimage.get_pan()
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_coord = self.t_.get('pan_coord', "data")
if pan_coord == 'data':
pan_x, pan_y = pan_x + coord_offset, pan_y + coord_offset
b.pan_x.set_tooltip("Coordinate for the pan position in X axis")
b.pan_x.set_text(str(pan_x))
#b.pan_x.add_callback('activated', self.set_pan_cb)
b.pan_y.set_tooltip("Coordinate for the pan position in Y axis")
b.pan_y.set_text(str(pan_y))
#b.pan_y.add_callback('activated', self.set_pan_cb)
b.apply_pan.add_callback('activated', self.set_pan_cb)
b.apply_pan.set_tooltip("Set the pan position")
b.wcs_sexagesimal.set_tooltip("Display pan position in sexagesimal")
b.wcs_sexagesimal.add_callback('activated',
lambda w, tf: self._update_pan_coords())
index = 0
for name in self.pancoord_options:
b.pan_coord.append_text(name)
index += 1
index = self.pancoord_options.index(pan_coord)
b.pan_coord.set_index(index)
b.pan_coord.set_tooltip("Pan coordinates type")
b.pan_coord.add_callback('activated', self.set_pan_coord_cb)
b.center_image.set_tooltip("Set the pan position to center of the image")
b.center_image.add_callback('activated', self.center_image_cb)
b.mark_center.set_tooltip("Mark the center (pan locator)")
b.mark_center.add_callback('activated', self.set_misc_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("New Images")
captions = (('Cut New:', 'label', 'Cut New', 'combobox'),
('Zoom New:', 'label', 'Zoom New', 'combobox'),
('Center New:', 'label', 'Center New', 'combobox'),
('Follow New', 'checkbutton', 'Raise New', 'checkbutton'),
('Create thumbnail', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.cut_new
index = 0
for name in self.autocut_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocuts', "off")
index = self.autocut_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocuts_cb)
b.cut_new.set_tooltip("Automatically set cut levels for new images")
combobox = b.zoom_new
index = 0
for name in self.autozoom_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autozoom', "off")
index = self.autozoom_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autozoom_cb)
b.zoom_new.set_tooltip("Automatically fit new images to window")
combobox = b.center_new
index = 0
for name in self.autocenter_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocenter', "off")
# Hack to convert old values that used to be T/F
if isinstance(option, bool):
choice = {True: 'on', False: 'off'}
option = choice[option]
index = self.autocenter_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocenter_cb)
b.center_new.set_tooltip("Automatically center new images in window")
b.follow_new.set_tooltip("View new images as they arrive")
b.raise_new.set_tooltip("Raise and focus tab for new images")
b.create_thumbnail.set_tooltip("Create thumbnail for new images")
self.w.follow_new.set_state(True)
self.w.follow_new.add_callback('activated', self.set_chprefs_cb)
self.w.raise_new.set_state(True)
self.w.raise_new.add_callback('activated', self.set_chprefs_cb)
self.w.create_thumbnail.set_state(True)
self.w.create_thumbnail.add_callback('activated', self.set_chprefs_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
exp = Widgets.Expander("General")
captions = (('Num Images:', 'label', 'Num Images', 'entryset'),
('Sort Order:', 'label', 'Sort Order', 'combobox'),
('Use scrollbars', 'checkbutton',
'Preload Images', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.num_images.set_tooltip(
"Maximum number of in memory images in channel (0==unlimited)")
num_images = self.t_.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
self.w.num_images.add_callback('activated', self.set_buffer_cb)
combobox = b.sort_order
index = 0
for name in self.sort_options:
combobox.append_text(name)
index += 1
option = self.t_.get('sort_order', 'loadtime')
index = self.sort_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_sort_cb)
b.sort_order.set_tooltip("Sort order for images in channel")
scrollbars = self.t_.get('scrollbars', 'off')
self.w.use_scrollbars.set_state(scrollbars in ['on', 'auto'])
self.w.use_scrollbars.add_callback('activated', self.set_scrollbars_cb)
b.use_scrollbars.set_tooltip("Use scrollbars around viewer")
preload_images = self.t_.get('preload_images', False)
self.w.preload_images.set_state(preload_images)
self.w.preload_images.add_callback('activated', self.set_preload_cb)
b.preload_images.set_tooltip(
"Preload adjacent images to speed up access")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("Remember")
captions = (('Restore Scale', 'checkbutton',
'Restore Pan', 'checkbutton'),
('Restore Transform', 'checkbutton',
'Restore Rotation', 'checkbutton'),
('Restore Cuts', 'checkbutton',
'Restore Color Map', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.restore_scale.set_state(self.t_.get('profile_use_scale', False))
self.w.restore_scale.add_callback('activated', self.set_profile_cb)
self.w.restore_scale.set_tooltip("Remember scale with image")
self.w.restore_pan.set_state(self.t_.get('profile_use_pan', False))
self.w.restore_pan.add_callback('activated', self.set_profile_cb)
self.w.restore_pan.set_tooltip("Remember pan position with image")
self.w.restore_transform.set_state(
self.t_.get('profile_use_transform', False))
self.w.restore_transform.add_callback('activated', self.set_profile_cb)
self.w.restore_transform.set_tooltip("Remember transform with image")
self.w.restore_rotation.set_state(
self.t_.get('profile_use_rotation', False))
self.w.restore_rotation.add_callback('activated', self.set_profile_cb)
self.w.restore_rotation.set_tooltip("Remember rotation with image")
self.w.restore_cuts.set_state(self.t_.get('profile_use_cuts', False))
self.w.restore_cuts.add_callback('activated', self.set_profile_cb)
self.w.restore_cuts.set_tooltip("Remember cut levels with image")
self.w.restore_color_map.set_state(
self.t_.get('profile_use_color_map', False))
self.w.restore_color_map.add_callback('activated', self.set_profile_cb)
self.w.restore_color_map.set_tooltip("Remember color map with image")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
exp = Widgets.Expander("ICC Profiles")
captions = (('Output ICC profile:', 'label', 'Output ICC profile',
'combobox'),
('Rendering intent:', 'label', 'Rendering intent',
'combobox'),
('Proof ICC profile:', 'label', 'Proof ICC profile',
'combobox'),
('Proof intent:', 'label', 'Proof intent', 'combobox'),
('__x', 'spacer', 'Black point compensation', 'checkbutton'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
value = self.t_.get('icc_output_profile', None)
combobox = b.output_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for the viewer display")
value = self.t_.get('icc_output_intent', 'perceptual')
combobox = b.rendering_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for the viewer display")
value = self.t_.get('icc_proof_profile', None)
combobox = b.proof_icc_profile
index = 0
for name in self.icc_profiles:
combobox.append_text(str(name))
index += 1
try:
index = self.icc_profiles.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("ICC profile for soft proofing")
value = self.t_.get('icc_proof_intent', None)
combobox = b.proof_intent
index = 0
for name in self.icc_intents:
combobox.append_text(name)
index += 1
try:
index = self.icc_intents.index(value)
combobox.set_index(index)
except Exception:
pass
combobox.add_callback('activated', self.set_icc_profile_cb)
combobox.set_tooltip("Rendering intent for soft proofing")
value = self.t_.get('icc_black_point_compensation', False)
b.black_point_compensation.set_state(value)
b.black_point_compensation.add_callback(
'activated', self.set_icc_profile_cb)
b.black_point_compensation.set_tooltip("Use black point compensation")
fr = Widgets.Frame()
fr.set_widget(w)
exp.set_widget(fr)
vbox.add_widget(exp, stretch=0)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Save Settings")
btn.add_callback('activated', lambda w: self.save_preferences())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_cmap_cb(self, w, index):
"""This callback is invoked when the user selects a new color
map from the preferences pane."""
name = cmap.get_names()[index]
self.t_.set(color_map=name)
def set_imap_cb(self, w, index):
"""This callback is invoked when the user selects a new intensity
map from the preferences pane."""
name = imap.get_names()[index]
self.t_.set(intensity_map=name)
def set_calg_cb(self, w, index):
"""This callback is invoked when the user selects a new color
hashing algorithm from the preferences pane."""
#index = w.get_index()
name = self.calg_names[index]
self.t_.set(color_algorithm=name)
def set_tablesize_cb(self, w):
value = int(w.get_text())
self.t_.set(color_hashsize=value)
def set_default_cmaps(self):
cmap_name = "gray"
imap_name = "ramp"
index = self.cmap_names.index(cmap_name)
self.w.cmap_choice.set_index(index)
index = self.imap_names.index(imap_name)
self.w.imap_choice.set_index(index)
self.t_.set(color_map=cmap_name, intensity_map=imap_name)
def set_default_distmaps(self):
name = 'linear'
index = self.calg_names.index(name)
self.w.calg_choice.set_index(index)
hashsize = 65535
## self.w.table_size.set_text(str(hashsize))
self.t_.set(color_algorithm=name, color_hashsize=hashsize)
def set_zoomrate_cb(self, w, rate):
self.t_.set(zoom_rate=rate)
def set_zoomrate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.zoom_rate.set_value(value)
def set_zoomalg_cb(self, w, idx):
self.t_.set(zoom_algorithm=self.zoomalg_names[idx])
def set_zoomalg_ext_cb(self, setting, value):
if not self.gui_up:
return
if value == 'step':
self.w.zoom_alg.set_index(0)
self.w.zoom_rate.set_enabled(False)
self.w.stretch_factor.set_enabled(False)
else:
self.w.zoom_alg.set_index(1)
self.w.zoom_rate.set_enabled(True)
self.w.stretch_factor.set_enabled(True)
def set_interp_cb(self, w, idx):
self.t_.set(interpolation=trcalc.interpolation_methods[idx])
def scalebase_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x_base, scale_y_base = self.fitsimage.get_scale_base_xy()
ratio = float(scale_x_base) / float(scale_y_base)
if ratio < 1.0:
# Y is stretched
idx = 1
ratio = 1.0 / ratio
elif ratio > 1.0:
# X is stretched
idx = 0
else:
idx = self.w.stretch_xy.get_index()
# Update stretch controls to reflect actual scale
self.w.stretch_xy.set_index(idx)
self.w.stretch_factor.set_value(ratio)
def set_zoom_defaults_cb(self, w):
rate = math.sqrt(2.0)
self.w.stretch_factor.set_value(1.0)
self.t_.set(zoom_algorithm='step', zoom_rate=rate,
scale_x_base=1.0, scale_y_base=1.0)
def set_stretch_cb(self, *args):
axis = self.w.stretch_xy.get_index()
value = self.w.stretch_factor.get_value()
if axis == 0:
self.t_.set(scale_x_base=value, scale_y_base=1.0)
else:
self.t_.set(scale_x_base=1.0, scale_y_base=value)
def set_autocenter_cb(self, w, idx):
option = self.autocenter_options[idx]
self.fitsimage.set_autocenter(option)
self.t_.set(autocenter=option)
def autocenter_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autocenter_options.index(option)
self.w.center_new.set_index(index)
def set_scale_cb(self, w, val):
scale_x = float(self.w.scale_x.get_text())
scale_y = float(self.w.scale_y.get_text())
self.fitsimage.scale_to(scale_x, scale_y)
def scale_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x, scale_y = value
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
def set_scale_limit_cb(self, *args):
scale_min = self.w.scale_min.get_text().lower()
if scale_min == 'none':
scale_min = None
else:
scale_min = float(scale_min)
scale_max = self.w.scale_max.get_text().lower()
if scale_max == 'none':
scale_max = None
else:
scale_max = float(scale_max)
self.t_.set(scale_min=scale_min, scale_max=scale_max)
def set_autozoom_cb(self, w, idx):
option = self.autozoom_options[idx]
self.fitsimage.enable_autozoom(option)
self.t_.set(autozoom=option)
def autozoom_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autozoom_options.index(option)
self.w.zoom_new.set_index(index)
def cut_levels(self, w):
fitsimage = self.fitsimage
loval, hival = fitsimage.get_cut_levels()
try:
lostr = self.w.cut_low.get_text().strip()
if lostr != '':
loval = float(lostr)
histr = self.w.cut_high.get_text().strip()
if histr != '':
hival = float(histr)
self.logger.debug("locut=%f hicut=%f" % (loval, hival))
return fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.show_error("Error cutting levels: %s" % (str(e)))
return True
def auto_levels(self, w):
self.fitsimage.auto_levels()
def cutset_cb(self, setting, value):
if not self.gui_up:
return
loval, hival = value
self.w.cut_low_value.set_text('%.4g' % (loval))
self.w.cut_high_value.set_text('%.4g' % (hival))
def config_autocut_params(self, method):
try:
index = self.autocut_methods.index(method)
self.w.auto_method.set_index(index)
except Exception:
pass
# remove old params
self.w.acvbox.remove_all()
# Create new autocuts object of the right kind
ac_class = AutoCuts.get_autocuts(method)
# Build up a set of control widgets for the autocuts
# algorithm tweakable parameters
paramlst = ac_class.get_params_metadata()
# Get the canonical version of this object stored in our cache
# and make a ParamSet from it
params = self.autocuts_cache.setdefault(method, Bunch.Bunch())
self.ac_params = ParamSet.ParamSet(self.logger, params)
# Build widgets for the parameter/attribute list
w = self.ac_params.build_params(paramlst,
orientation=self.orientation)
self.ac_params.add_callback('changed', self.autocut_params_changed_cb)
# Add this set of widgets to the pane
self.w.acvbox.add_widget(w, stretch=1)
def set_autocut_method_ext_cb(self, setting, value):
if not self.gui_up:
return
autocut_method = self.t_['autocut_method']
self.fv.gui_do(self.config_autocut_params, autocut_method)
def set_autocut_params_ext_cb(self, setting, value):
if not self.gui_up:
return
params = self.t_['autocut_params']
params_d = dict(params) # noqa
self.ac_params.update_params(params_d)
#self.fv.gui_do(self.ac_params.params_to_widgets)
def set_autocut_method_cb(self, w, idx):
method = self.autocut_methods[idx]
self.config_autocut_params(method)
args, kwdargs = self.ac_params.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_method=method, autocut_params=params)
def autocut_params_changed_cb(self, paramObj, ac_obj):
"""This callback is called when the user changes the attributes of
an object via the paramSet.
"""
args, kwdargs = paramObj.get_params()
params = list(kwdargs.items())
self.t_.set(autocut_params=params)
def set_autocuts_cb(self, w, index):
option = self.autocut_options[index]
self.fitsimage.enable_autocuts(option)
self.t_.set(autocuts=option)
def autocuts_changed_ext_cb(self, setting, option):
self.logger.debug("autocuts changed to %s" % option)
index = self.autocut_options.index(option)
if self.gui_up:
self.w.cut_new.set_index(index)
def set_transforms_cb(self, *args):
flip_x = self.w.flip_x.get_state()
flip_y = self.w.flip_y.get_state()
swap_xy = self.w.swap_xy.get_state()
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
return True
def set_transform_ext_cb(self, setting, value):
if not self.gui_up:
return
flip_x, flip_y, swap_xy = (
self.t_['flip_x'], self.t_['flip_y'], self.t_['swap_xy'])
self.w.flip_x.set_state(flip_x)
self.w.flip_y.set_state(flip_y)
self.w.swap_xy.set_state(swap_xy)
def rgbmap_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
calg_name = self.t_['color_algorithm']
try:
idx = self.calg_names.index(calg_name)
except IndexError:
idx = 0
self.w.algorithm.set_index(idx)
cmap_name = self.t_['color_map']
try:
idx = self.cmap_names.index(cmap_name)
except IndexError:
idx = 0
self.w.colormap.set_index(idx)
imap_name = self.t_['intensity_map']
try:
idx = self.imap_names.index(imap_name)
except IndexError:
idx = 0
self.w.intensity.set_index(idx)
def set_buflen_ext_cb(self, setting, value):
num_images = self.t_['numImages']
# update the datasrc length
chinfo = self.channel
chinfo.datasrc.set_bufsize(num_images)
self.logger.debug("num images was set to {0}".format(num_images))
if not self.gui_up:
return
self.w.num_images.set_text(str(num_images))
def set_sort_cb(self, w, index):
"""This callback is invoked when the user selects a new sort order
from the preferences pane."""
name = self.sort_options[index]
self.t_.set(sort_order=name)
def set_preload_cb(self, w, tf):
"""This callback is invoked when the user checks the preload images
box in the preferences pane."""
self.t_.set(preload_images=tf)
def set_scrollbars_cb(self, w, tf):
"""This callback is invoked when the user checks the 'Use Scrollbars'
box in the preferences pane."""
scrollbars = 'on' if tf else 'off'
self.t_.set(scrollbars=scrollbars)
def set_icc_profile_cb(self, setting, idx):
idx = self.w.output_icc_profile.get_index()
output_profile_name = self.icc_profiles[idx]
idx = self.w.rendering_intent.get_index()
intent_name = self.icc_intents[idx]
idx = self.w.proof_icc_profile.get_index()
proof_profile_name = self.icc_profiles[idx]
idx = self.w.proof_intent.get_index()
proof_intent = self.icc_intents[idx]
bpc = self.w.black_point_compensation.get_state()
self.t_.set(icc_output_profile=output_profile_name,
icc_output_intent=intent_name,
icc_proof_profile=proof_profile_name,
icc_proof_intent=proof_intent,
icc_black_point_compensation=bpc)
return True
def rotate_cb(self, w, deg):
#deg = self.w.rotate.get_value()
self.t_.set(rot_deg=deg)
return True
def set_rotate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.rotate.set_value(value)
return True
def center_image_cb(self, *args):
self.fitsimage.center_image()
return True
def pan_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
self._update_pan_coords()
def set_pan_cb(self, *args):
idx = self.w.pan_coord.get_index()
pan_coord = self.pancoord_options[idx]
pan_xs = self.w.pan_x.get_text().strip()
pan_ys = self.w.pan_y.get_text().strip()
# TODO: use current value for other coord if only one coord supplied
if (':' in pan_xs) or (':' in pan_ys):
# TODO: get maximal precision
pan_x = wcs.hmsStrToDeg(pan_xs)
pan_y = wcs.dmsStrToDeg(pan_ys)
pan_coord = 'wcs'
elif pan_coord == 'wcs':
pan_x = float(pan_xs)
pan_y = float(pan_ys)
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x = float(pan_xs) - coord_offset
pan_y = float(pan_ys) - coord_offset
self.fitsimage.set_pan(pan_x, pan_y, coord=pan_coord)
return True
def _update_pan_coords(self):
pan_coord = self.t_.get('pan_coord', 'data')
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
#self.logger.debug("updating pan coords (%s) %f %f" % (pan_coord, pan_x, pan_y))
if pan_coord == 'wcs':
use_sex = self.w.wcs_sexagesimal.get_state()
if use_sex:
pan_x = wcs.raDegToString(pan_x, format='%02d:%02d:%010.7f')
pan_y = wcs.decDegToString(pan_y, format='%s%02d:%02d:%09.7f')
else:
coord_offset = self.fv.settings.get('pixel_coords_offset', 0.0)
pan_x += coord_offset
pan_y += coord_offset
self.w.pan_x.set_text(str(pan_x))
self.w.pan_y.set_text(str(pan_y))
index = self.pancoord_options.index(pan_coord)
self.w.pan_coord.set_index(index)
def set_pan_coord_cb(self, w, idx):
pan_coord = self.pancoord_options[idx]
pan_x, pan_y = self.fitsimage.get_pan(coord=pan_coord)
self.t_.set(pan=(pan_x, pan_y), pan_coord=pan_coord)
#self._update_pan_coords()
return True
def restore_cb(self, *args):
self.t_.set(flip_x=False, flip_y=False, swap_xy=False,
rot_deg=0.0)
self.fitsimage.center_image()
return True
def set_misc_cb(self, *args):
markc = (self.w.mark_center.get_state() != 0)
self.t_.set(show_pan_position=markc)
self.fitsimage.show_pan_mark(markc)
return True
def set_chprefs_cb(self, *args):
switchnew = (self.w.follow_new.get_state() != 0)
raisenew = (self.w.raise_new.get_state() != 0)
genthumb = (self.w.create_thumbnail.get_state() != 0)
self.t_.set(switchnew=switchnew, raisenew=raisenew,
genthumb=genthumb)
def set_chprefs_ext_cb(self, *args):
if self.gui_up:
self.w.follow_new.set_state(self.t_['switchnew'])
self.w.raise_new.set_state(self.t_['raisenew'])
self.w.create_thumbnail.set_state(self.t_['genthumb'])
def set_profile_cb(self, *args):
restore_scale = (self.w.restore_scale.get_state() != 0)
restore_pan = (self.w.restore_pan.get_state() != 0)
restore_cuts = (self.w.restore_cuts.get_state() != 0)
restore_transform = (self.w.restore_transform.get_state() != 0)
restore_rotation = (self.w.restore_rotation.get_state() != 0)
restore_color_map = (self.w.restore_color_map.get_state() != 0)
self.t_.set(profile_use_scale=restore_scale, profile_use_pan=restore_pan,
profile_use_cuts=restore_cuts,
profile_use_transform=restore_transform,
profile_use_rotation=restore_rotation,
profile_use_color_map=restore_color_map)
def set_buffer_cb(self, *args):
num_images = int(self.w.num_images.get_text())
self.logger.debug("setting num images {0}".format(num_images))
self.t_.set(numImages=num_images)
def set_wcs_params_cb(self, *args):
idx = self.w.wcs_coords.get_index()
try:
ctype = wcsmod.coord_types[idx]
except IndexError:
ctype = 'icrs'
idx = self.w.wcs_display.get_index()
dtype = wcsmod.display_types[idx]
self.t_.set(wcs_coords=ctype, wcs_display=dtype)
def preferences_to_controls(self):
prefs = self.t_
# color map
rgbmap = self.fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
try:
index = self.cmap_names.index(cm.name)
except ValueError:
# may be a custom color map installed
index = 0
self.w.cmap_choice.set_index(index)
# color dist algorithm
calg = rgbmap.get_hash_algorithm()
index = self.calg_names.index(calg)
self.w.calg_choice.set_index(index)
## size = rgbmap.get_hash_size()
## self.w.table_size.set_text(str(size))
# intensity map
im = rgbmap.get_imap()
try:
index = self.imap_names.index(im.name)
except ValueError:
# may be a custom intensity map installed
index = 0
self.w.imap_choice.set_index(index)
# TODO: this is a HACK to get around Qt's callbacks
# on setting widget values--need a way to disable callbacks
# for direct setting
auto_zoom = prefs.get('autozoom', 'off')
# zoom settings
zoomalg = prefs.get('zoom_algorithm', "step")
index = self.zoomalg_names.index(zoomalg)
self.w.zoom_alg.set_index(index)
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
self.w.zoom_rate.set_value(zoomrate)
self.w.zoom_rate.set_enabled(zoomalg != 'step')
self.w.stretch_factor.set_enabled(zoomalg != 'step')
self.scalebase_changed_ext_cb(prefs, None)
scale_x, scale_y = self.fitsimage.get_scale_xy()
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
scale_min = prefs.get('scale_min', None)
self.w.scale_min.set_text(str(scale_min))
scale_max = prefs.get('scale_max', None)
self.w.scale_max.set_text(str(scale_max))
# panning settings
self._update_pan_coords()
self.w.mark_center.set_state(prefs.get('show_pan_position', False))
# transform settings
self.w.flip_x.set_state(prefs.get('flip_x', False))
self.w.flip_y.set_state(prefs.get('flip_y', False))
self.w.swap_xy.set_state(prefs.get('swap_xy', False))
self.w.rotate.set_value(prefs.get('rot_deg', 0.00))
# auto cuts settings
autocuts = prefs.get('autocuts', 'off')
index = self.autocut_options.index(autocuts)
self.w.cut_new.set_index(index)
autocut_method = prefs.get('autocut_method', None)
if autocut_method is None:
autocut_method = 'histogram'
else:
## params = prefs.get('autocut_params', {})
## p = self.autocuts_cache.setdefault(autocut_method, {})
## p.update(params)
pass
self.config_autocut_params(autocut_method)
# auto zoom settings
auto_zoom = prefs.get('autozoom', 'off')
index = self.autozoom_options.index(auto_zoom)
self.w.zoom_new.set_index(index)
# wcs settings
method = prefs.get('wcs_coords', "icrs")
try:
index = wcsmod.coord_types.index(method)
self.w.wcs_coords.set_index(index)
except ValueError:
pass
method = prefs.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
self.w.wcs_display.set_index(index)
except ValueError:
pass
# misc settings
prefs.setdefault('switchnew', True)
self.w.follow_new.set_state(prefs['switchnew'])
prefs.setdefault('raisenew', True)
self.w.raise_new.set_state(prefs['raisenew'])
prefs.setdefault('genthumb', True)
self.w.create_thumbnail.set_state(prefs['genthumb'])
num_images = prefs.get('numImages', 0)
self.w.num_images.set_text(str(num_images))
prefs.setdefault('preload_images', False)
self.w.preload_images.set_state(prefs['preload_images'])
# profile settings
prefs.setdefault('profile_use_scale', False)
self.w.restore_scale.set_state(prefs['profile_use_scale'])
prefs.setdefault('profile_use_pan', False)
self.w.restore_pan.set_state(prefs['profile_use_pan'])
prefs.setdefault('profile_use_cuts', False)
self.w.restore_cuts.set_state(prefs['profile_use_cuts'])
prefs.setdefault('profile_use_transform', False)
self.w.restore_transform.set_state(prefs['profile_use_transform'])
prefs.setdefault('profile_use_rotation', False)
self.w.restore_rotation.set_state(prefs['profile_use_rotation'])
prefs.setdefault('profile_use_color_map', False)
self.w.restore_color_map.set_state(prefs['profile_use_color_map'])
def save_preferences(self):
self.t_.save()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.preferences_to_controls()
def pause(self):
pass
def resume(self):
pass
def stop(self):
self.gui_up = False
def redo(self):
pass
def __str__(self):
return 'preferences'
# END
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/event_handling/viewlims.py | 6 | 2880 | # Creates two identical panels. Zooming in on the right panel will show
# a rectangle in the first panel, denoting the zoomed region.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
# We just subclass Rectangle so that it can be called with an Axes
# instance, causing the rectangle to update its shape to match the
# bounds of the Axes
class UpdatingRect(Rectangle):
def __call__(self, ax):
self.set_bounds(*ax.viewLim.bounds)
ax.figure.canvas.draw_idle()
# A class that will regenerate a fractal set as we zoom in, so that you
# can actually see the increasing detail. A box in the left panel will show
# the area to which we are zoomed.
class MandlebrotDisplay(object):
def __init__(self, h=500, w=500, niter=50, radius=2., power=2):
self.height = h
self.width = w
self.niter = niter
self.radius = radius
self.power = power
def __call__(self, xstart, xend, ystart, yend):
self.x = np.linspace(xstart, xend, self.width)
self.y = np.linspace(ystart, yend, self.height).reshape(-1,1)
c = self.x + 1.0j * self.y
threshold_time = np.zeros((self.height, self.width))
z = np.zeros(threshold_time.shape, dtype=np.complex)
mask = np.ones(threshold_time.shape, dtype=np.bool)
for i in range(self.niter):
z[mask] = z[mask]**self.power + c[mask]
mask = (np.abs(z) < self.radius)
threshold_time += mask
return threshold_time
def ax_update(self, ax):
ax.set_autoscale_on(False) # Otherwise, infinite loop
#Get the number of points from the number of pixels in the window
dims = ax.axesPatch.get_window_extent().bounds
self.width = int(dims[2] + 0.5)
self.height = int(dims[2] + 0.5)
#Get the range for the new area
xstart,ystart,xdelta,ydelta = ax.viewLim.bounds
xend = xstart + xdelta
yend = ystart + ydelta
# Update the image object with our new data and extent
im = ax.images[-1]
im.set_data(self.__call__(xstart, xend, ystart, yend))
im.set_extent((xstart, xend, ystart, yend))
ax.figure.canvas.draw_idle()
md = MandlebrotDisplay()
Z = md(-2., 0.5, -1.25, 1.25)
fig1, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
ax2.imshow(Z, origin='lower', extent=(md.x.min(), md.x.max(), md.y.min(), md.y.max()))
rect = UpdatingRect([0, 0], 0, 0, facecolor='None', edgecolor='black')
rect.set_bounds(*ax2.viewLim.bounds)
ax1.add_patch(rect)
# Connect for changing the view limits
ax2.callbacks.connect('xlim_changed', rect)
ax2.callbacks.connect('ylim_changed', rect)
ax2.callbacks.connect('xlim_changed', md.ax_update)
ax2.callbacks.connect('ylim_changed', md.ax_update)
plt.show()
| mit |
abhiver222/perkt | face_recognition.py | 2 | 5724 | import cv2
import sys
#import matplotlib.pyplot as pt
import numpy as np
import numpy.linalg as la
import math as mt
#Content of out eigens
<<<<<<< HEAD:face_recognition.py
# there would be five images of each person
# the collumns would be the frob norm of each type
# 4 rows for each person
# 1)Smiling
# 2)Sad
# 3)Serious
# 4)Blank
# 5)If wearing specs then without specs
# 6)looking left
# 7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
# 'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
# 'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
# 'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81], #left right serious
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.4.3]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
<<<<<<< HEAD:face_recognition.py
'Chris':50,
=======
'Chris':200,
>>>>>>> origin/master:facial_recognition.py
'Tim':150}
#hardcode values into ournorms above
imagePath = sys.argv[1]
<<<<<<< HEAD:face_recognition.py
def recognizeFace(image,faces):
=======
def recognizeFace(faces, image):
>>>>>>> origin/master:facial_recognition.py
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
=======
#there would be five images of each person
#the collumns would be the frob norm of each type
#4 rows for each person
#1)Smiling
#2)Sad
#3)Serious
#4)Blank
#5)If wearing specs then without specs
#6)looking left
#7)looking right
#ournorms = {'Abhishek':[5916.56,6155.725,5835.83,6033.245,5922.402,6207.052,6028.91],
#'Akshay':[6268.704,6335.443,6119.169,6277.252,6126.155,6232.754,6294.937],
#'Chris':[6479.241,6297.295,6477.624,6463.082,6385.727,6275.596,6200.595],
#'Tim':[6507.45,6569.225,6637.975,6731.95,6546.934,6239.888,6529.477]}
ournorms = {'Abhishek':[5866.278,6229.924,6123.536,5988.862,5966.183,5990.367,5661.118],
'Akshay':[6748.139,5658.617,6238.200,6671.678,6228.899,6167.573,5830.901],
'Chris':[6312.924,6374.821,6465.274,6275.596,6596.240,6382.099,6456.81],
'Tim':[6226.022,6010.737,6107.618,6107.386,5994.380,5916.834,7052.43]}
indbuffervals = {'Abhishek':100,
'Akshay':100,
'Chris':50,
'Tim':150}
#hardcode values into ournorms above
#imagePath = sys.argv[1]
def recognizeFace(image,faces):
retval = True
if(len(faces)>10):
print("Fuck it too many faces shoot everyone")
return True, 100
for i in range(faces.shape[0]):
x, y, w, h = faces[i]
bufw = (400 - w)/2
bufh = (400 - h)/2
inmod = image[y-bufw:y+w+bufw,x-bufh:x+h+bufh]
retwhat = checker(inmod)
retval = retwhat and retval
return retval,len(faces)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
def checker(inmod):
tempnorm = la.norm(inmod)
retval = False
for name,val in ournorms.iteritems():
for j in val:
if(np.abs(j-tempnorm)<indbuffervals[name]):
retval = True;
print("is")
print(name)
break
if(retval):
break
if(not retval):
print("not")
print(name)
return retval
# Get values from command line
def check(image):
#imagePath = sys.argv[1]
#cascPath = sys.argv[2]
imagePath = image
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
imnonmod = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.25,
minNeighbors=5,
minSize=(40, 40)
)
<<<<<<< HEAD:face_recognition.py
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
<<<<<<< HEAD:face_recognition.py
what = True
if(len(faces)>0):
what, number = recognizeFace(image,faces)
=======
what = True
if(len(faces)>0):
what, number = recognizeFace(faces, image)
>>>>>>> origin/master:facial_recognition.py
# return what to the arduino
if(what is False):
print("intruder detected")
=======
print "Found {0} faces!".format(len(faces))
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
what = True
<<<<<<< HEAD:face_recognition.py
cv2.imshow("Faces found", image)
<<<<<<< HEAD:face_recognition.py
cv2.waitKey(0)
=======
#cv2.waitKey(0)
return what
=======
if(len(faces)>0):
what, number = recognizeFace(image,faces)
# return what to the arduino
if(what is False):
print("intruder detected")
>>>>>>> ed4cfb17e7f30b2eda5f67f3661d4598f64953a3:facial_recognition.py
>>>>>>> origin/master:facial_recognition.py
cv2.imshow("Faces found", image)
#cv2.waitKey(0)
check(imagePath)
#check(imagePath)
| mit |
Unidata/MetPy | v0.11/startingguide-1.py | 4 | 1432 | import matplotlib.pyplot as plt
import numpy as np
import metpy.calc as mpcalc
from metpy.plots import SkewT
from metpy.units import units
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Create arrays of pressure, temperature, dewpoint, and wind components
p = [902, 897, 893, 889, 883, 874, 866, 857, 849, 841, 833, 824, 812, 796, 776, 751,
727, 704, 680, 656, 629, 597, 565, 533, 501, 468, 435, 401, 366, 331, 295, 258,
220, 182, 144, 106] * units.hPa
t = [-3, -3.7, -4.1, -4.5, -5.1, -5.8, -6.5, -7.2, -7.9, -8.6, -8.9, -7.6, -6, -5.1,
-5.2, -5.6, -5.4, -4.9, -5.2, -6.3, -8.4, -11.5, -14.9, -18.4, -21.9, -25.4,
-28, -32, -37, -43, -49, -54, -56, -57, -58, -60] * units.degC
td = [-22, -22.1, -22.2, -22.3, -22.4, -22.5, -22.6, -22.7, -22.8, -22.9, -22.4,
-21.6, -21.6, -21.9, -23.6, -27.1, -31, -38, -44, -46, -43, -37, -34, -36,
-42, -46, -49, -48, -47, -49, -55, -63, -72, -88, -93, -92] * units.degC
# Calculate parcel profile
prof = mpcalc.parcel_profile(p, t[0], td[0]).to('degC')
u = np.linspace(-10, 10, len(p)) * units.knots
v = np.linspace(-20, 20, len(p)) * units.knots
skew.plot(p, t, 'r')
skew.plot(p, td, 'g')
skew.plot(p, prof, 'k') # Plot parcel profile
skew.plot_barbs(p[::5], u[::5], v[::5])
skew.ax.set_xlim(-50, 15)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
plt.show() | bsd-3-clause |
mattsmart/biomodels | oncogenesis_dynamics/firstpassage.py | 1 | 15435 | import matplotlib.pyplot as plt
import numpy as np
import time
from os import sep
from multiprocessing import Pool, cpu_count
from constants import OUTPUT_DIR, PARAMS_ID, PARAMS_ID_INV, COLOURS_DARK_BLUE
from data_io import read_varying_mean_sd_fpt_and_params, collect_fpt_mean_stats_and_params, read_fpt_and_params,\
write_fpt_and_params
from formulae import stoch_gillespie, stoch_tauleap_lowmem, stoch_tauleap, get_physical_fp_stable_and_not, map_init_name_to_init_cond
from params import Params
from presets import presets
from plotting import plot_table_params
def get_fpt(ensemble, init_cond, params, num_steps=1000000, establish_switch=False, brief=True):
# TODO could pass simmethod tau or gillespie to params and parse here
if establish_switch:
fpt_flag = False
establish_flag = True
else:
fpt_flag = True
establish_flag = False
fp_times = np.zeros(ensemble)
for i in xrange(ensemble):
if brief:
species_end, times_end = stoch_tauleap_lowmem(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
else:
species, times = stoch_gillespie(init_cond, num_steps, params, fpt_flag=fpt_flag,
establish_flag=establish_flag)
times_end = times[-1]
# plotting
#plt.plot(times, species)
#plt.show()
fp_times[i] = times_end
if establish_switch:
print "establish time is", fp_times[i]
return fp_times
def get_mean_fpt(init_cond, params, samplesize=32, establish_switch=False):
fpt = get_fpt(samplesize, init_cond, params, establish_switch=establish_switch)
return np.mean(fpt)
def wrapper_get_fpt(fn_args_dict):
np.random.seed() # TODO double check that this fixes cluster RNG issues
if fn_args_dict['kwargs'] is not None:
return get_fpt(*fn_args_dict['args'], **fn_args_dict['kwargs'])
else:
return get_fpt(*fn_args_dict['args'])
def fast_fp_times(ensemble, init_cond, params, num_processes, num_steps='default', establish_switch=False):
if num_steps == 'default':
kwargs_dict = {'num_steps': 1000000, 'establish_switch': establish_switch}
else:
kwargs_dict = {'num_steps': num_steps, 'establish_switch': establish_switch}
fn_args_dict = [0]*num_processes
print "NUM_PROCESSES:", num_processes
assert ensemble % num_processes == 0
for i in xrange(num_processes):
subensemble = ensemble / num_processes
print "process:", i, "job size:", subensemble, "runs"
fn_args_dict[i] = {'args': (subensemble, init_cond, params),
'kwargs': kwargs_dict}
t0 = time.time()
pool = Pool(num_processes)
results = pool.map(wrapper_get_fpt, fn_args_dict)
pool.close()
pool.join()
print "TIMER:", time.time() - t0
fp_times = np.zeros(ensemble)
for i, result in enumerate(results):
fp_times[i*subensemble:(i+1)*subensemble] = result
return fp_times
def fast_mean_fpt_varying(param_vary_name, param_vary_values, params, num_processes, init_name="x_all", samplesize=30, establish_switch=False):
assert samplesize % num_processes == 0
mean_fpt_varying = [0]*len(param_vary_values)
sd_fpt_varying = [0]*len(param_vary_values)
for idx, pv in enumerate(param_vary_values):
params_step = params.mod_copy( {param_vary_name: pv} )
init_cond = map_init_name_to_init_cond(params, init_name)
fp_times = fast_fp_times(samplesize, init_cond, params_step, num_processes, establish_switch=establish_switch)
mean_fpt_varying[idx] = np.mean(fp_times)
sd_fpt_varying[idx] = np.std(fp_times)
return mean_fpt_varying, sd_fpt_varying
def fpt_histogram(fpt_list, params, figname_mod="", flag_show=False, flag_norm=True, flag_xlog10=False, flag_ylog10=False, fs=12):
ensemble_size = len(fpt_list)
bins = np.linspace(np.min(fpt_list), np.max(fpt_list), 50) #50)
#bins = np.arange(0, 3*1e4, 50) # to plot against FSP
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=(8,6), dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(fpt_list))) # TODO check this matches multihist
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot
plt.hist(fpt_list, bins=bins, alpha=0.6, weights=weights)
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, label=None, weights=weights, edgecolor='k', linewidth=0.5,
fill=False)
# draw mean line
#plt.axvline(np.mean(fpt_list), color='k', linestyle='dashed', linewidth=2)
# labels
plt.title('First-passage time histogram (%d runs) - %s' % (ensemble_size, params.system), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
#plt.legend(loc='upper right', fontsize=fs)
# create table of params
plot_table_params(ax, params)
# save and show
plt_save = "fpt_histogram" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
return ax
def fpt_histogram_multi(multi_fpt_list, labels, figname_mod="", fs=12, bin_linspace=80, colours=COLOURS_DARK_BLUE,
figsize=(8,6), ec='k', lw=0.5, flag_norm=False, flag_show=False, flag_xlog10=False,
flag_ylog10=False, flag_disjoint=False):
# resize fpt lists if not all same size (to the min size)
fpt_lengths = [len(fpt) for fpt in multi_fpt_list]
ensemble_size = np.min(fpt_lengths)
# cleanup data to same size
if sum(fpt_lengths - ensemble_size) > 0:
print "Resizing multi_fpt_list elements:", fpt_lengths, "to the min size of:", ensemble_size
for idx in xrange(len(fpt_lengths)):
multi_fpt_list[idx] = multi_fpt_list[idx][:ensemble_size]
bins = np.linspace(np.min(multi_fpt_list), np.max(multi_fpt_list), bin_linspace)
# normalize
if flag_norm:
y_label = 'Probability'
weights = np.ones_like(multi_fpt_list) / ensemble_size
else:
y_label = 'Frequency'
weights = np.ones_like(multi_fpt_list)
# prep fig before axes mod
fig = plt.figure(figsize=figsize, dpi=120)
ax = plt.gca()
# mod axes (log)
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
max_log = np.ceil(np.max(np.log10(multi_fpt_list)))
bins = np.logspace(0.1, max_log, 100)
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
# plot calls
if flag_disjoint:
plt.hist(multi_fpt_list, bins=bins, color=colours, label=labels, weights=weights, edgecolor=ec, linewidth=lw)
else:
for idx, fpt_list in enumerate(multi_fpt_list):
plt.hist(fpt_list, bins=bins, alpha=0.6, color=colours[idx], label=labels[idx],
weights=weights[idx,:])
plt.hist(fpt_list, histtype='step', bins=bins, alpha=0.6, color=colours[idx],
label=None,weights=weights[idx,:], edgecolor=ec, linewidth=lw, fill=False)
# labels
plt.title('First-passage time histogram (%d runs)' % (ensemble_size), fontsize=fs)
ax.set_xlabel('First-passage time (cell division timescale)', fontsize=fs)
ax.set_ylabel(y_label, fontsize=fs)
plt.legend(loc='upper right', fontsize=fs)
ax.tick_params(labelsize=fs)
# plt.locator_params(axis='x', nbins=4)
# save and show
plt_save = "fpt_multihistogram" + figname_mod
fig.savefig(OUTPUT_DIR + sep + plt_save + '.pdf', bbox_inches='tight')
if flag_show:
plt.show()
def plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_vary_name, param_set, params, samplesize, SEM_flag=True, show_flag=False, figname_mod=""):
if SEM_flag:
sd_fpt_varying = sd_fpt_varying / np.sqrt(samplesize) # s.d. from CLT since sample mean is approx N(mu, sd**2/n)
plt.errorbar(param_set, mean_fpt_varying, yerr=sd_fpt_varying, label="sim")
plt.title("Mean FP Time, %s varying (sample=%d)" % (param_vary_name, samplesize))
ax = plt.gca()
ax.set_xlabel(param_vary_name)
ax.set_ylabel('Mean FP time')
# log options
for i in xrange(len(mean_fpt_varying)):
print i, param_set[i], mean_fpt_varying[i], sd_fpt_varying[i]
flag_xlog10 = True
flag_ylog10 = True
if flag_xlog10:
ax.set_xscale("log", nonposx='clip')
#ax.set_xlim([0.8*1e2, 1*1e7])
if flag_ylog10:
ax.set_yscale("log", nonposx='clip')
#ax.set_ylim([0.8*1e2, 3*1e5])
# create table of params
plot_table_params(ax, params)
plt_save = "mean_fpt_varying" + figname_mod
plt.savefig(OUTPUT_DIR + sep + plt_save + '.png', bbox_inches='tight')
if show_flag:
plt.show()
return ax
if __name__ == "__main__":
# SCRIPT FLAGS
run_compute_fpt = False
run_read_fpt = False
run_generate_hist_multi = False
run_load_hist_multi = False
run_collect = False
run_means_read_and_plot = False
run_means_collect_and_plot = True
# SCRIPT PARAMETERS
establish_switch = True
brief = True
num_steps = 1000000 # default 1000000
ensemble = 1 # default 100
# DYNAMICS PARAMETERS
params = presets('preset_xyz_constant') # preset_xyz_constant, preset_xyz_constant_fast, valley_2hit
# OTHER PARAMETERS
init_cond = np.zeros(params.numstates, dtype=int)
init_cond[0] = int(params.N)
# PLOTTING
FS = 16
EC = 'k'
LW = 0.5
FIGSIZE=(8,6)
if run_compute_fpt:
fp_times = get_fpt(ensemble, init_cond, params, num_steps=num_steps, establish_switch=establish_switch, brief=brief)
write_fpt_and_params(fp_times, params)
fpt_histogram(fp_times, params, flag_show=True, figname_mod="XZ_model_withFeedback_mu1e-1")
if run_read_fpt:
dbdir = OUTPUT_DIR
dbdir_100 = dbdir + sep + "fpt_mean" + sep + "100_c95"
fp_times_xyz_100, params_a = read_fpt_and_params(dbdir_100)
dbdir_10k = dbdir + sep + "fpt_mean" + sep + "10k_c95"
fp_times_xyz_10k, params_b = read_fpt_and_params(dbdir_10k)
if run_generate_hist_multi:
ensemble = 21
num_proc = cpu_count() - 1
param_vary_id = "N"
param_idx = PARAMS_ID_INV[param_vary_id]
param_vary_values = [1e2, 1e3, 1e4]
param_vary_labels = ['A', 'B', 'C']
params_ensemble = [params.params_list[:] for _ in param_vary_values]
multi_fpt = np.zeros((len(param_vary_values), ensemble))
multi_fpt_labels = ['label' for _ in param_vary_values]
for idx, param_val in enumerate(param_vary_values):
param_val_string = "%s=%.3f" % (param_vary_id, param_val)
params_step = params.mod_copy({param_vary_id: param_val})
#fp_times = get_fpt(ensemble, init_cond, params_set[idx], num_steps=num_steps)
fp_times = fast_fp_times(ensemble, init_cond, params_step, num_proc, establish_switch=establish_switch)
write_fpt_and_params(fp_times, params_step, filename="fpt_multi", filename_mod=param_val_string)
multi_fpt[idx,:] = np.array(fp_times)
multi_fpt_labels[idx] = "%s (%s)" % (param_vary_labels[idx], param_val_string)
fpt_histogram_multi(multi_fpt, multi_fpt_labels, flag_show=True, flag_ylog10=False)
if run_load_hist_multi:
flag_norm = True
dbdir = OUTPUT_DIR + sep + "may25_100"
#dbdir_c80 = dbdir + "fpt_feedback_z_ens1040_c0.80_params"
c80_header = "fpt_feedback_z_ens1040_c80_N100"
c88_header = "fpt_feedback_z_ens1040_c88_N100"
c95_header = "fpt_feedback_z_ens1040_c95_N100"
fp_times_xyz_c80, params_a = read_fpt_and_params(dbdir, "%s_data.txt" % c80_header, "%s_params.csv" % c80_header)
fp_times_xyz_c88, params_b = read_fpt_and_params(dbdir, "%s_data.txt" % c88_header, "%s_params.csv" % c88_header)
fp_times_xyz_c95, params_c = read_fpt_and_params(dbdir, "%s_data.txt" % c95_header, "%s_params.csv" % c95_header)
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=False, figname_mod="_xyz_feedbackz_N10k_c88_may25")
plt.close('all')
fpt_histogram(fp_times_xyz_c88, params_b, flag_ylog10=True, figname_mod="_xyz_feedbackz_N10k_c88_may25_logy")
plt.close('all')
multi_fpt = [fp_times_xyz_c80, fp_times_xyz_c88, fp_times_xyz_c95]
labels = ("c=0.80 (Region I)", "c=0.88 (Region IV)", "c=0.95 (Region III)")
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=False, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=flag_norm, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE)
fpt_histogram_multi(multi_fpt, labels, flag_show=True, flag_ylog10=True, flag_norm=False, fs=FS, ec=EC, lw=LW, figsize=FIGSIZE, flag_disjoint=True)
if run_means_read_and_plot:
datafile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N.txt"
paramfile = OUTPUT_DIR + sep + "fpt_stats_collected_mean_sd_varying_N_params.csv"
samplesize=48
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plt_axis = plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
"""
mu = params.mu
mixed_fp_zinf_at_N = [0.0]*len(param_set)
for idx, N in enumerate(param_set):
params_at_N = params.mod_copy( {'N': N} )
fps = get_physical_and_stable_fp(params_at_N)
assert len(fps) == 1
mixed_fp_zinf_at_N[idx] = fps[0][2]
plt_axis.plot(param_set, [1/(mu*n) for n in param_set], '-o', label="(mu*N)^-1")
plt_axis.plot(param_set, [1/(mu*zinf) for zinf in mixed_fp_zinf_at_N], '-o', label="(mu*z_inf)^-1")
plt_axis.set_yscale("log", nonposx='clip')
plt_axis.set_xscale("log", nonposx='clip')
plt_axis.legend()
plt.savefig(OUTPUT_DIR + sep + "theorycompare_loglog" + '.png', bbox_inches='tight')
plt.show()
"""
if run_means_collect_and_plot:
dbdir = OUTPUT_DIR + sep + "tocollect" + sep + "runset_june17_FPT_cvary_44_ens240"
datafile, paramfile = collect_fpt_mean_stats_and_params(dbdir)
samplesize=240
mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params = \
read_varying_mean_sd_fpt_and_params(datafile, paramfile)
plot_mean_fpt_varying(mean_fpt_varying, sd_fpt_varying, param_to_vary, param_set, params, samplesize,
SEM_flag=True, show_flag=True, figname_mod="_%s_n%d" % (param_to_vary, samplesize))
| mit |
soylentdeen/Graffity | src/Vibrations/VibrationExplorer.py | 1 | 5531 | import sys
sys.path.append('../')
import numpy
import Graffity
import CIAO_DatabaseTools
import astropy.time as aptime
from matplotlib import pyplot
import colorsys
def getFreqs():
while True:
retval = []
enteredText = raw_input("Enter a comma separated list of frequencies: ")
try:
for val in enteredText.split(','):
retval.append(float(val.strip()))
break
except:
pass
return retval
def getModes():
while True:
enteredText = raw_input("Which modes to investigate? AVC or ALL? : ")
if enteredText == 'AVC':
return 'AVC'
if enteredText == 'ALL':
return 'ALL'
def getDataLoggers(DB, GravityVals, startTime, ax=None):
order = numpy.argsort(GravityVals[:,-2])
GravityVals = GravityVals[order]
i = 1
for record in GravityVals:
print("%03d | %s" % (i,aptime.Time(float(record[-2]), format='mjd').iso))
i += 1
index = int(raw_input("Enter desired index :")) - 1
FTData = Graffity.GRAVITY_Data(GravityVals[index][-1])
FTData.DualSciP2VM.computeOPDPeriodograms()
VibrationPeaks = FTData.DualSciP2VM.findVibrationPeaks()
FTData.computeACQCAMStrehl()
FTData.computeACQCAMStrehl()
#freqs = getFreqs()
#Modes = getModes()
CIAOVals = DB.query(keywords=['ALT', 'AZ', 'STREHL'], timeOfDay='NIGHT', startTime=startTime)
DataLoggers = {}
for UT in [1, 2, 3, 4]:
closest = numpy.argsort(numpy.abs(CIAOVals[UT][:,-4]
- float(GravityVals[index,-2])))[0]
DataLoggers[UT] = Graffity.DataLogger(directory=CIAOVals[UT][closest,-3])
DataLoggers[UT].loadData()
DataLoggers[UT].computeStrehl()
freqs = extractBCIFreqs(VibrationPeaks, UT)
DataLoggers[UT].measureVibs(frequencies=freqs, modes='AVC')
return DataLoggers, VibrationPeaks
def extractBCIFreqs(VibrationPeaks, UT):
freqs = []
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
for bl in baselines.keys():
if UT in baselines[bl]:
for f in VibrationPeaks[bl]['freqs']:
freqs.append(f)
return numpy.array(freqs)
fig = pyplot.figure(0, figsize=(8.0, 10.0), frameon=False)
fig.clear()
ax1 = fig.add_axes([0.1, 0.2, 0.4, 0.3])
ax2 = fig.add_axes([0.1, 0.5, 0.4, 0.4], sharex=ax1)
ax3 = fig.add_axes([0.5, 0.2, 0.4, 0.3], sharex=ax1)
ax3.yaxis.tick_right()
ax4 = fig.add_axes([0.5, 0.5, 0.4, 0.4], sharex=ax1)
ax4.yaxis.tick_right()
GDB = CIAO_DatabaseTools.GRAVITY_Database()
CDB = CIAO_DatabaseTools.CIAO_Database()
startTime = '2017-08-10 00:00:00'
GravityVals = GDB.query(keywords = [], timeOfDay='NIGHT', startTime=startTime)
#ax1.set_xscale('log')
#ax1.set_yscale('log')
CIAO, Vibrations = getDataLoggers(CDB, GravityVals, startTime, ax=ax1)
hsv = [(numpy.random.uniform(low=0.0, high=1),
numpy.random.uniform(low=0.2, high=1),
numpy.random.uniform(low=0.9, high=1)) for i in
range(99)]
colors = []
for h in hsv:
colors.append(colorsys.hsv_to_rgb(h[0], h[1], h[2]))
handles = numpy.array([])
labels = numpy.array([])
baselines = {0:[4,3], 1:[4, 2], 2:[4, 1], 3:[3, 2], 4:[3, 1], 5:[2, 1]}
colors = {0:'y', 1:'g', 2:'r', 3:'c', 4:'m', 5:'k'}
for CIAO_ID, ax in zip([1, 2, 3, 4], [ax1, ax2, ax3, ax4]):
DL = CIAO[CIAO_ID]
for mode in DL.vibPower.keys():
BCIVibs = {}
for bl in baselines.keys():
if CIAO_ID in baselines[bl]:
label = "UT%dUT%d" % (baselines[bl][0], baselines[bl][1])
BCIVibs[label] = {'index':bl, 'power':[]}
f = []
p = []
for peak in DL.vibPower[mode]['CommPower'].iteritems():
if peak[1] > 0:
f.append(peak[0])
p.append(numpy.log10(peak[1]))
for label in BCIVibs.keys():
if not( f[-1] in Vibrations[BCIVibs[label]['index']]['freqs']):
BCIVibs[label]['power'].append(0.0)
else:
for i, freq in enumerate(Vibrations[BCIVibs[label]['index']]['freqs']):
if freq == f[-1]:
BCIVibs[label]['power'].append(Vibrations[BCIVibs[label]['index']]['power'][i])
#ax.plot(DL.ZPowerFrequencies, numpy.log10(DL.ZPowerCommands[mode,:]), color =
# colors[mode])
f = numpy.array(f)
p = numpy.array(p)
ax.scatter(numpy.log10(f), p, color='b')
for bl in BCIVibs.keys():
BCIVibs[bl]['power'] = numpy.array(BCIVibs[bl]['power'])
nonzero = BCIVibs[bl]['power'] > 0.0
ax.scatter(numpy.log10(f[nonzero]), numpy.log10(BCIVibs[bl]['power'][nonzero]),
label=bl, color = colors[BCIVibs[bl]['index']])
#ax.scatter(numpy.array(f), numpy.array(p), color=colors[mode],
# label='Mode %d' % mode)
h, l = ax.get_legend_handles_labels()
handles=numpy.append(handles, numpy.array(h))
labels =numpy.append(labels, numpy.array(l))
#ax1.set_ybound(0, 20)
#ax2.set_ybound(0, 20)
#ax3.set_ybound(0, 20)
#ax4.set_ybound(0, 20)
#ax1.set_xbound(0, 160)
#ax2.set_xbound(0, 160)
#ax3.set_xbound(0, 160)
#ax4.set_xbound(0, 160)
#ax2.xaxis.set_ticklabels([])
#ax4.xaxis.set_ticklabels([])
junk, indices = numpy.unique(labels, return_index=True)
fig.legend(handles[indices], labels[indices], ncol=4, loc=3, scatterpoints=1)
fig.show()
#"""
| mit |
eoinmurray/icarus | Experiments/power_dep.py | 1 | 1341 |
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import numpy as np
import matplotlib.pyplot as plt
from constants import Constants
import Icarus.Experiment as Experiment
if __name__ == "__main__":
"""
Runs power dependance.
"""
constants = Constants()
hold_power = np.linspace(0.2, 0.8, num=60)
hold_x = []
hold_xx = []
for power in hold_power:
constants.power = power
experiment = Experiment(constants, Visualizer=False)
experiment.run('power_dep')
hold_x.append(experiment.spectrometer.x)
hold_xx.append(experiment.spectrometer.xx)
plt.plot(np.log10(hold_power), np.log10(hold_x), 'ro')
plt.plot(np.log10(hold_power), np.log10(hold_xx), 'bo')
idx = (np.abs(hold_power-1)).argmin()
A = np.vstack([np.log10(hold_power[0:idx]), np.ones(len(np.log10(hold_power[0:idx])))]).T
mx, cx = np.linalg.lstsq(A, np.log10(hold_x[0:idx]))[0]
mxx, cxx = np.linalg.lstsq(A, np.log10(hold_xx[0:idx]))[0]
print mx, mxx
hold_power_interpolate = np.linspace(np.min(hold_power[0:idx]), np.max(hold_power[0:idx]), num=200)
plt.plot(np.log10(hold_power_interpolate), mx*np.log10(hold_power_interpolate) + cx, 'g--')
plt.plot(np.log10(hold_power_interpolate), mxx*np.log10(hold_power_interpolate) + cxx, 'g--')
plt.legend(['X', 'XX'])
plt.show() | mit |
openai/baselines | baselines/results_plotter.py | 1 | 3455 | import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, yaxis, title):
fig = plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i % len(COLORS)]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.tight_layout()
fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())
plt.grid(True)
def split_by_task(taskpath):
return taskpath['dirname'].split('/')[-1].split('-')[0]
def plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task):
results = plot_util.load_results(dirs)
plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn, average_group=True, resample=int(1e6))
# Example usage in jupyter-notebook
# from baselines.results_plotter import plot_results
# %matplotlib inline
# plot_results("./log")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD)
parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main()
| mit |
Sixshaman/networkx | doc/make_gallery.py | 35 | 2453 | """
Generate a thumbnail gallery of examples.
"""
from __future__ import print_function
import os, glob, re, shutil, sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot
import matplotlib.image
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
examples_source_dir = '../examples/drawing'
examples_dir = 'examples/drawing'
template_dir = 'source/templates'
static_dir = 'source/static/examples'
pwd=os.getcwd()
rows = []
template = """
{%% extends "layout.html" %%}
{%% set title = "Gallery" %%}
{%% block body %%}
<h3>Click on any image to see source code</h3>
<br/>
%s
{%% endblock %%}
"""
link_template = """
<a href="%s"><img src="%s" border="0" alt="%s"/></a>
"""
if not os.path.exists(static_dir):
os.makedirs(static_dir)
os.chdir(examples_source_dir)
all_examples=sorted(glob.glob("*.py"))
# check for out of date examples
stale_examples=[]
for example in all_examples:
png=example.replace('py','png')
png_static=os.path.join(pwd,static_dir,png)
if (not os.path.exists(png_static) or
os.stat(png_static).st_mtime < os.stat(example).st_mtime):
stale_examples.append(example)
for example in stale_examples:
print(example, end=" ")
png=example.replace('py','png')
matplotlib.pyplot.figure(figsize=(6,6))
stdout=sys.stdout
sys.stdout=open('/dev/null','w')
try:
execfile(example)
sys.stdout=stdout
print(" OK")
except ImportError as strerr:
sys.stdout=stdout
sys.stdout.write(" FAIL: %s\n" % strerr)
continue
matplotlib.pyplot.clf()
im=matplotlib.image.imread(png)
fig = Figure(figsize=(2.5, 2.5))
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks
=[])
# basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
thumbfile=png.replace(".png","_thumb.png")
fig.savefig(thumbfile)
shutil.copy(thumbfile,os.path.join(pwd,static_dir,thumbfile))
shutil.copy(png,os.path.join(pwd,static_dir,png))
basename, ext = os.path.splitext(example)
link = '%s/%s.html'%(examples_dir, basename)
rows.append(link_template%(link, os.path.join('_static/examples',thumbfile), basename))
os.chdir(pwd)
fh = open(os.path.join(template_dir,'gallery.html'), 'w')
fh.write(template%'\n'.join(rows))
fh.close()
| bsd-3-clause |
hainm/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
kcarnold/autograd | examples/fluidsim/fluidsim.py | 2 | 4623 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from scipy.misc import imread
import matplotlib
import matplotlib.pyplot as plt
import os
from builtins import range
# Fluid simulation code based on
# "Real-Time Fluid Dynamics for Games" by Jos Stam
# http://www.intpowertechcorp.com/GDC03.pdf
def project(vx, vy):
"""Project the velocity field to be approximately mass-conserving,
using a few iterations of Gauss-Seidel."""
p = np.zeros(vx.shape)
h = 1.0/vx.shape[0]
div = -0.5 * h * (np.roll(vx, -1, axis=0) - np.roll(vx, 1, axis=0)
+ np.roll(vy, -1, axis=1) - np.roll(vy, 1, axis=1))
for k in range(10):
p = (div + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0)
+ np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1))/4.0
vx -= 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))/h
vy -= 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))/h
return vx, vy
def advect(f, vx, vy):
"""Move field f according to x and y velocities (u and v)
using an implicit Euler integrator."""
rows, cols = f.shape
cell_ys, cell_xs = np.meshgrid(np.arange(rows), np.arange(cols))
center_xs = (cell_xs - vx).ravel()
center_ys = (cell_ys - vy).ravel()
# Compute indices of source cells.
left_ix = np.floor(center_xs).astype(int)
top_ix = np.floor(center_ys).astype(int)
rw = center_xs - left_ix # Relative weight of right-hand cells.
bw = center_ys - top_ix # Relative weight of bottom cells.
left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation.
right_ix = np.mod(left_ix + 1, rows)
top_ix = np.mod(top_ix, cols)
bot_ix = np.mod(top_ix + 1, cols)
# A linearly-weighted sum of the 4 surrounding cells.
flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \
+ rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix])
return np.reshape(flat_f, (rows, cols))
def simulate(vx, vy, smoke, num_time_steps, ax=None, render=False):
print("Running simulation...")
for t in range(num_time_steps):
if ax: plot_matrix(ax, smoke, t, render)
vx_updated = advect(vx, vx, vy)
vy_updated = advect(vy, vx, vy)
vx, vy = project(vx_updated, vy_updated)
smoke = advect(smoke, vx, vy)
if ax: plot_matrix(ax, smoke, num_time_steps, render)
return smoke
def plot_matrix(ax, mat, t, render=False):
plt.cla()
ax.matshow(mat)
ax.set_xticks([])
ax.set_yticks([])
plt.draw()
if render:
matplotlib.image.imsave('step{0:03d}.png'.format(t), mat)
plt.pause(0.001)
if __name__ == '__main__':
simulation_timesteps = 100
print("Loading initial and target states...")
init_smoke = imread('init_smoke.png')[:,:,0]
#target = imread('peace.png')[::2,::2,3]
target = imread('skull.png')[::2,::2]
rows, cols = target.shape
init_dx_and_dy = np.zeros((2, rows, cols)).ravel()
def distance_from_target_image(smoke):
return np.mean((target - smoke)**2)
def convert_param_vector_to_matrices(params):
vx = np.reshape(params[:(rows*cols)], (rows, cols))
vy = np.reshape(params[(rows*cols):], (rows, cols))
return vx, vy
def objective(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
final_smoke = simulate(init_vx, init_vy, init_smoke, simulation_timesteps)
return distance_from_target_image(final_smoke)
# Specify gradient of objective function using autograd.
objective_with_grad = value_and_grad(objective)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, frameon=False)
def callback(params):
init_vx, init_vy = convert_param_vector_to_matrices(params)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax)
print("Optimizing initial conditions...")
result = minimize(objective_with_grad, init_dx_and_dy, jac=True, method='CG',
options={'maxiter':25, 'disp':True}, callback=callback)
print("Rendering optimized flow...")
init_vx, init_vy = convert_param_vector_to_matrices(result.x)
simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax, render=True)
print("Converting frames to an animated GIF...")
os.system("convert -delay 5 -loop 0 step*.png"
" -delay 250 step100.png surprise.gif") # Using imagemagick.
os.system("rm step*.png")
| mit |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |
Juanlu001/pfc | demo/plot_h.py | 1 | 6084 | #******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <jl.cercos@upm.es> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import sys
import os
from os import path
import numpy as np
try:
from PyQt4 import QtGui
except:
try:
from PySide import QtGui
except:
raise ImportError("PyQt4 or PySide is required to use this tool")
try:
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
except:
raise ImportError("matplotlib is required to use this tool")
class FigureController(FigureCanvas):
"""Matplotlib figure widget controller"""
def __init__(self):
"""Constructor"""
# Create the figure in the canvas
self.fig = Figure()
self.ax11 = self.fig.add_subplot(221)
self.ax21 = self.fig.add_subplot(222)
self.ax12 = self.fig.add_subplot(223)
self.ax22 = self.fig.add_subplot(224)
self.ax = (self.ax11, self.ax21, self.ax12, self.ax22)
FigureCanvas.__init__(self, self.fig)
FNAME = path.join('test_case_2_exp_data.dat')
# For some reason the input file is bad sortened
T,_,_,_,_,_,_,_,_,H3,H2,H1,H4, = self.readFile(FNAME)
exp_t = T
exp_h = (H1, H2, H3, H4)
titles = ('H1', 'H2', 'H3', 'H4')
self.lines = []
for i in range(len(self.ax)):
ax = self.ax[i]
t = [0.0]
h = [0.0]
line, = ax.plot(t,
h,
label=r'$H_{SPH}$',
color="black",
linewidth=1.0)
self.lines.append(line)
ax.plot(exp_t,
exp_h[i],
label=r'$H_{Exp}$',
color="red",
linewidth=1.0)
# Set some options
ax.grid()
ax.legend(loc='best')
ax.set_title(titles[i])
ax.set_xlim(0, 6)
ax.set_ylim(0.0, 0.6)
ax.set_autoscale_on(False)
ax.set_xlabel(r"$t \, [\mathrm{s}]$", fontsize=21)
ax.set_ylabel(r"$H \, [\mathrm{m}]$", fontsize=21)
# force the figure redraw
self.fig.canvas.draw()
# call the update method (to speed-up visualization)
self.timerEvent(None)
# start timer, trigger event every 1000 millisecs (=1sec)
self.timer = self.startTimer(1000)
def readFile(self, filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[1:]:
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return map(list, zip(*data))
def timerEvent(self, evt):
"""Custom timerEvent code, called at timer event receive"""
# Read and plot the new data
data = self.readFile('sensors_h.out')
t = data[0]
hh = (data[-4], data[-3], data[-2], data[-1])
for i in range(len(hh)):
h = hh[i]
self.lines[i].set_data(t, h)
# Redraw
self.fig.canvas.draw()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
widget = FigureController()
widget.setWindowTitle("Wave height")
widget.show()
sys.exit(app.exec_())
| gpl-3.0 |
SitiBanc/1061_NCTU_IOMDS | 1025/Homework 5/HW5_5.py | 1 | 5472 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 21:05:37 2017
@author: sitibanc
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# =============================================================================
# Read CSV
# =============================================================================
df = pd.read_csv('TXF20112015.csv', sep=',', header = None) # dataframe (time, close, open, high, low, volume)
TAIEX = df.values # ndarray
tradeday = list(set(TAIEX[:, 0] // 10000)) # 交易日(YYYYMMDD)
tradeday.sort()
# =============================================================================
# Strategy 5: 承Strategy 4,加入30點停損點
# =============================================================================
profit0 = np.zeros((len(tradeday),1))
count = 0 # 進場次數
for i in range(len(tradeday)):
date = tradeday[i]
idx = np.nonzero(TAIEX[:, 0] // 10000 == date)[0]
idx.sort()
openning = TAIEX[idx[0], 2] # 當日開盤價
long_signal = openning + 30 # 買訊
short_signal = openning - 30 # 賣訊
# 符合買訊的時間點
idx2 = np.nonzero(TAIEX[idx, 3] >= long_signal)[0] # 買點
# 設定買訊停損點
if len(idx2) > 0:
# 當日交易中在第一個買訊之後(含買訊,故index = 0不能用在停損)的資料
tmp2 = TAIEX[idx[idx2[0]]:idx[-1], :]
idx2_stop = np.nonzero(tmp2[:, 4] <= openning)[0]
# 符合賣訊的時間點
idx3 = np.nonzero(TAIEX[idx, 4] <= short_signal)[0] # 賣點
# 設定賣訊停損點
if len(idx3) > 0:
# 當日交易中在第一個賣訊之後(含賣訊,故index = 0不能用在停損)的資料
tmp3 = TAIEX[idx[idx3[0]]:idx[-1], :]
idx3_stop = np.nonzero(tmp3[:, 3] >= openning)[0]
if len(idx2) == 0 and len(idx3) == 0: # 當日沒有觸及買賣點(不進場)
p1 = 0
p2 = 0
elif len(idx3) == 0: # 當日僅出現買訊(進場做多)
p1 = TAIEX[idx[idx2[0]], 1] # 第一個買點收盤價買進
if len(idx2_stop) > 1: # 有觸及停損點
p2 = tmp2[idx2_stop[1], 1] # 第一個停損點(index = 1)出現時的收盤價賣出
else:
p2 = TAIEX[idx[-1], 1] # 當日收盤價賣出
count += 1
elif len(idx2) == 0: # 當日僅出現賣訊(進場做空)
p2 = TAIEX[idx[idx3[0]], 1] # 第一個賣點收盤價賣出
if len(idx3_stop) > 1: # 有觸及停損點
p1 = tmp3[idx3_stop[1], 1] # 停損點出現時的收盤價買回
else:
p1 = TAIEX[idx[-1], 1] # 當日收盤價買回
count += 1
elif idx2[0] > idx3[0]: # 當日賣訊先出現(進場做空)
p2 = TAIEX[idx[idx3[0]], 1] # 第一個賣點收盤價賣出
if len(idx3_stop) > 1: # 有觸及停損點
p1 = tmp3[idx3_stop[1], 1] # 停損點出現時的收盤價買回
else:
p1 = TAIEX[idx[-1], 1] # 當日收盤價買回
count += 1
else: # 當日買訊先出現(進場做多)
p1 = TAIEX[idx[idx2[0]], 1] # 第一個買點收盤價買進
if len(idx2_stop) > 1: # 有觸及停損點
p2 = tmp2[idx2_stop[1], 1] # 停損點出現時的收盤價賣出
else:
p2 = TAIEX[idx[-1], 1] # 當日收盤價賣出
count += 1
profit0[i] = p2 - p1
print('Strategy 5: 承Strategy 4,加入30點停損點\n逐日損益折線圖')
profit02 = np.cumsum(profit0) # 逐日損益獲利
plt.plot(profit02) # 逐日損益折線圖
plt.show()
print('每日損益分佈圖')
plt.hist(profit0, bins = 100) # 每日損益的分佈圖(直方圖)
plt.show()
# 計算數據
ans1 = count # 進場次數
ans2 = profit02[-1] # 總損益點數
ans3 = np.sum(profit0 > 0) / ans1 * 100 # 勝率
ans4 = np.mean(profit0[profit0 > 0]) # 獲利時的平均獲利點數
zero_profit = len(profit0[profit0 <= 0]) - (len(profit0) - ans1)# 進場沒有贏的日數(profit為0 - 沒有進場)
ans5 = np.sum(profit0[profit0 < 0]) / zero_profit # 虧損時的平均虧損點數
print('進場次數:', ans1, '\n總損益點數:', ans2, '\n勝率:', ans3, '%')
print('賺錢時平均每次獲利點數', ans4, '\n輸錢時平均每次損失點數:', ans5, '\n')
| apache-2.0 |
Unidata/MetPy | v0.12/_downloads/7b1d8e864fd4783fdaff1a83cdf9c52f/Find_Natural_Neighbors_Verification.py | 6 | 2521 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumscribed
circle ("circumcircle") containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.interpolate.geometry import circumcircle_radius, find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, circumcenters = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can plot all of the triangles as well as the circles representing the circumcircles
#
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenters and calculated circumradii, plot the circumcircles
for idx, cc in enumerate(circumcenters):
ax.plot(cc[0], cc[1], 'k.', markersize=5)
circ = plt.Circle(cc, circumcircle_radius(*tri.points[tri.simplices[idx]]),
edgecolor='k', facecolor='none', transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
pfnet/chainer | examples/wavenet/train.py | 6 | 5955 | import argparse
import os
import pathlib
import warnings
import numpy
import chainer
from chainer.training import extensions
import chainerx
from net import EncoderDecoderModel
from net import UpsampleNet
from net import WaveNet
from utils import Preprocess
import matplotlib
matplotlib.use('Agg')
parser = argparse.ArgumentParser(description='Chainer example: WaveNet')
parser.add_argument('--batchsize', '-b', type=int, default=4,
help='Numer of audio clips in each mini-batch')
parser.add_argument('--length', '-l', type=int, default=7680,
help='Number of samples in each audio clip')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./VCTK-Corpus',
help='Directory of dataset')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--seed', type=int, default=0,
help='Random seed to split dataset into train and test')
parser.add_argument('--snapshot_interval', type=int, default=10000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
parser.add_argument('--process', type=int, default=1,
help='Number of parallel processes')
parser.add_argument('--prefetch', type=int, default=8,
help='Number of prefetch samples')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('GPU: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Datasets
if not os.path.isdir(args.dataset):
raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
dataset, int(len(dataset) * 0.9), args.seed)
# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))
# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)
# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize,
n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
valid, args.batchsize, repeat=False, shuffle=False,
n_processes=args.process, n_prefetch=args.prefetch)
# Updater and Trainer
updater = chainer.training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
# Extensions
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/accuracy',
'validation/main/loss', 'validation/main/accuracy']),
trigger=display_interval)
trainer.extend(extensions.PlotReport(
['main/loss', 'validation/main/loss'],
'iteration', file_name='loss.png', trigger=display_interval))
trainer.extend(extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'iteration', file_name='accuracy.png', trigger=display_interval))
trainer.extend(extensions.ProgressBar(update_interval=10))
# Resume
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run
trainer.run()
| mit |
mganeva/mantid | Framework/PythonInterface/mantid/plots/modest_image/modest_image.py | 1 | 10141 | # v0.2 obtained on March 12, 2019
"""
Modification of Chris Beaumont's mpl-modest-image package to allow the use of
set_extent.
"""
from __future__ import print_function, division
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.image as mi
import matplotlib.colors as mcolors
import matplotlib.cbook as cbook
from matplotlib.transforms import IdentityTransform, Affine2D
import numpy as np
IDENTITY_TRANSFORM = IdentityTransform()
class ModestImage(mi.AxesImage):
"""
Computationally modest image class.
ModestImage is an extension of the Matplotlib AxesImage class
better suited for the interactive display of larger images. Before
drawing, ModestImage resamples the data array based on the screen
resolution and view window. This has very little affect on the
appearance of the image, but can substantially cut down on
computation since calculations of unresolved or clipped pixels
are skipped.
The interface of ModestImage is the same as AxesImage. However, it
does not currently support setting the 'extent' property. There
may also be weird coordinate warping operations for images that
I'm not aware of. Don't expect those to work either.
"""
def __init__(self, *args, **kwargs):
self._full_res = None
self._full_extent = kwargs.get('extent', None)
super(ModestImage, self).__init__(*args, **kwargs)
self.invalidate_cache()
def set_data(self, A):
"""
Set the image array
ACCEPTS: numpy/PIL Image A
"""
self._full_res = A
self._A = A
if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype,
np.float):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self.invalidate_cache()
def invalidate_cache(self):
self._bounds = None
self._imcache = None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
self._sx, self._sy = None, None
self._pixel2world_cache = None
self._world2pixel_cache = None
def set_extent(self, extent):
self._full_extent = extent
self.invalidate_cache()
mi.AxesImage.set_extent(self, extent)
def get_array(self):
"""Override to return the full-resolution array"""
return self._full_res
@property
def _pixel2world(self):
if self._pixel2world_cache is None:
# Pre-compute affine transforms to convert between the 'world'
# coordinates of the axes (what is shown by the axis labels) to
# 'pixel' coordinates in the underlying array.
extent = self._full_extent
if extent is None:
self._pixel2world_cache = IDENTITY_TRANSFORM
else:
self._pixel2world_cache = Affine2D()
self._pixel2world.translate(+0.5, +0.5)
self._pixel2world.scale((extent[1] - extent[0]) / self._full_res.shape[1],
(extent[3] - extent[2]) / self._full_res.shape[0])
self._pixel2world.translate(extent[0], extent[2])
self._world2pixel_cache = None
return self._pixel2world_cache
@property
def _world2pixel(self):
if self._world2pixel_cache is None:
self._world2pixel_cache = self._pixel2world.inverted()
return self._world2pixel_cache
def _scale_to_res(self):
"""
Change self._A and _extent to render an image whose resolution is
matched to the eventual rendering.
"""
# Find out how we need to slice the array to make sure we match the
# resolution of the display. We pass self._world2pixel which matters
# for cases where the extent has been set.
x0, x1, sx, y0, y1, sy = extract_matched_slices(axes=self.axes,
shape=self._full_res.shape,
transform=self._world2pixel)
# Check whether we've already calculated what we need, and if so just
# return without doing anything further.
if (self._bounds is not None and
sx >= self._sx and sy >= self._sy and
x0 >= self._bounds[0] and x1 <= self._bounds[1] and
y0 >= self._bounds[2] and y1 <= self._bounds[3]):
return
# Slice the array using the slices determined previously to optimally
# match the display
self._A = self._full_res[y0:y1:sy, x0:x1:sx]
self._A = cbook.safe_masked_invalid(self._A)
# We now determine the extent of the subset of the image, by determining
# it first in pixel space, and converting it to the 'world' coordinates.
# See https://github.com/matplotlib/matplotlib/issues/8693 for a
# demonstration of why origin='upper' and extent=None needs to be
# special-cased.
if self.origin == 'upper' and self._full_extent is None:
xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y1 - .5, y0 - .5
else:
xmin, xmax, ymin, ymax = x0 - .5, x1 - .5, y0 - .5, y1 - .5
xmin, ymin, xmax, ymax = self._pixel2world.transform([(xmin, ymin), (xmax, ymax)]).ravel()
mi.AxesImage.set_extent(self, [xmin, xmax, ymin, ymax])
# self.set_extent([xmin, xmax, ymin, ymax])
# Finally, we cache the current settings to avoid re-computing similar
# arrays in future.
self._sx = sx
self._sy = sy
self._bounds = (x0, x1, y0, y1)
self.changed()
def draw(self, renderer, *args, **kwargs):
if self._full_res.shape is None:
return
self._scale_to_res()
super(ModestImage, self).draw(renderer, *args, **kwargs)
def main():
from time import time
import matplotlib.pyplot as plt
x, y = np.mgrid[0:2000, 0:2000]
data = np.sin(x / 10.) * np.cos(y / 30.)
f = plt.figure()
ax = f.add_subplot(111)
# try switching between
artist = ModestImage(ax, data=data)
ax.set_aspect('equal')
artist.norm.vmin = -1
artist.norm.vmax = 1
ax.add_artist(artist)
t0 = time()
plt.gcf().canvas.draw()
t1 = time()
print("Draw time for %s: %0.1f ms" % (artist.__class__.__name__,
(t1 - t0) * 1000))
plt.show()
def imshow(axes, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""Similar to matplotlib's imshow command, but produces a ModestImage
Unlike matplotlib version, must explicitly specify axes
"""
if not axes._hold:
axes.cla()
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
if aspect is None:
aspect = rcParams['image.aspect']
axes.set_aspect(aspect)
im = ModestImage(axes, cmap=cmap, norm=norm, interpolation=interpolation,
origin=origin, extent=extent, filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
axes._set_artist_props(im)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(axes.patch)
# if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
elif norm is None:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
axes.images.append(im)
im._remove_method = lambda h: axes.images.remove(h)
return im
def extract_matched_slices(axes=None, shape=None, extent=None,
transform=IDENTITY_TRANSFORM):
"""Determine the slice parameters to use, matched to the screen.
:param ax: Axes object to query. It's extent and pixel size
determine the slice parameters
:param shape: Tuple of the full image shape to slice into. Upper
boundaries for slices will be cropped to fit within
this shape.
:rtype: tulpe of x0, x1, sx, y0, y1, sy
Indexing the full resolution array as array[y0:y1:sy, x0:x1:sx] returns
a view well-matched to the axes' resolution and extent
"""
# Find extent in display pixels (this gives the resolution we need
# to sample the array to)
ext = (axes.transAxes.transform([(1, 1)]) - axes.transAxes.transform([(0, 0)]))[0]
# Find the extent of the axes in 'world' coordinates
xlim, ylim = axes.get_xlim(), axes.get_ylim()
# Transform the limits to pixel coordinates
ind0 = transform.transform([min(xlim), min(ylim)])
ind1 = transform.transform([max(xlim), max(ylim)])
def _clip(val, lo, hi):
return int(max(min(val, hi), lo))
# Determine the range of pixels to extract from the array, including a 5
# pixel margin all around. We ensure that the shape of the resulting array
# will always be at least (1, 1) even if there is really no overlap, to
# avoid issues.
y0 = _clip(ind0[1] - 5, 0, shape[0] - 1)
y1 = _clip(ind1[1] + 5, 1, shape[0])
x0 = _clip(ind0[0] - 5, 0, shape[1] - 1)
x1 = _clip(ind1[0] + 5, 1, shape[1])
# Determine the strides that can be used when extracting the array
sy = int(max(1, min((y1 - y0) / 5., np.ceil(abs((ind1[1] - ind0[1]) / ext[1])))))
sx = int(max(1, min((x1 - x0) / 5., np.ceil(abs((ind1[0] - ind0[0]) / ext[0])))))
return x0, x1, sx, y0, y1, sy
if __name__ == "__main__":
main()
| gpl-3.0 |
xebitstudios/Kayak | examples/poisson_glm.py | 3 | 1224 | import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import kayak
N = 10000
D = 5
P = 1
learn = 0.00001
batch_size = 500
# Random inputs.
X = npr.randn(N,D)
true_W = npr.randn(D,P)
lam = np.exp(np.dot(X, true_W))
Y = npr.poisson(lam)
kyk_batcher = kayak.Batcher(batch_size, N)
# Build network.
kyk_inputs = kayak.Inputs(X, kyk_batcher)
# Labels.
kyk_targets = kayak.Targets(Y, kyk_batcher)
# Weights.
W = 0.01*npr.randn(D,P)
kyk_W = kayak.Parameter(W)
# Linear layer.
kyk_activation = kayak.MatMult( kyk_inputs, kyk_W)
# Exponential inverse-link function.
kyk_lam = kayak.ElemExp(kyk_activation)
# Poisson negative log likelihood.
kyk_nll = kyk_lam - kayak.ElemLog(kyk_lam) * kyk_targets
# Sum the losses.
kyk_loss = kayak.MatSum( kyk_nll )
for ii in xrange(100):
for batch in kyk_batcher:
loss = kyk_loss.value
print loss, np.sum((kyk_W.value - true_W)**2)
grad = kyk_loss.grad(kyk_W)
kyk_W.value -= learn * grad
# Plot the true and inferred rate for a subset of data.
T_slice = slice(0,100)
kyk_inputs.value = X[T_slice,:]
plt.figure()
plt.plot(lam[T_slice], 'k')
plt.plot(kyk_lam.value, '--r')
plt.show() | mit |
ndardenne/pymatgen | pymatgen/io/abinit/tasks.py | 2 | 166549 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.serializers.json_coders import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ScfTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_etsfio
True if etsf-io is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
with open(stdout, "r") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.has_netcdf = False
self.has_etsfio = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
self.has_etsfio = "etsf_io" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s, ETSF-IO: %s" % (self.has_netcdf, self.has_etsfio))
return "\n".join(lines)
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@abc.abstractproperty
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
self.queue_errors = scheduler_parser.errors
# the queue errors in the task
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
# self.history.critical(msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed and we know what happened
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
msg = 'found unknown messages in the queue error: %s' % str(qerr_info)
logger.history.info(msg)
print(msg)
# self.num_waiting += 1
# if self.num_waiting > 1000:
rt = self.datetimes.get_runtime().seconds
tl = self.manager.qadapter.timelimit
if rt > tl:
msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
print(msg)
return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file. TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error trying to find a running configuration:\n%s" % straceback()
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
# logger.critical(msg)
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
out_den = self.outdir.path_in("out_DEN")
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
# TODO Remove
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: string with the optic variables (filepaths will be added at run time).
nscf_node: The NSCF task that will produce thw WFK file or string with the path of the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDF paths.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
deps = {n: "1WF" for n in self.ddk_nodes}
#deps = {n: "DDK" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_"+str(n+1) : ddk for n,ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile" : self.wfk_filepath})
files_nml = {"FILES" : all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
results = super(OpticTask, self).get_results(**kwargs)
#results.update(
#"epsilon_infinity":
#))
return results
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(autoparal_vars.keys())
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
| mit |
tmeits/pybrain | pybrain/auxiliary/gaussprocess.py | 25 | 9240 | from __future__ import print_function
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de; Christian Osendorfer, osendorf@in.tum.de'
from scipy import r_, exp, zeros, eye, array, asarray, random, ravel, diag, sqrt, sin, cos, sort, mgrid, dot, floor
from scipy import c_ #@UnusedImport
from scipy.linalg import solve, inv
from pybrain.datasets import SupervisedDataSet
from scipy.linalg import norm
class GaussianProcess:
""" This class represents a basic n-dimensional Gaussian Process. The implementation
follows the book 'Gaussian Processes for Machine Learning' by Carl E. Rasmussen
(an online version is available at: http://www.gaussianprocess.org/gpml/chapters/).
The hyper parameters of the GP can be adjusted by setting the self.hyper varible,
which must be a tuple of size 3.
"""
def __init__(self, indim, start=0, stop=1, step=0.1):
""" initializes the gaussian process object.
:arg indim: input dimension
:key start: start of interval for sampling the GP.
:key stop: stop of interval for sampling the GP.
:key step: stepsize for sampling interval.
:note: start, stop, step can either be scalars or tuples of size 'indim'.
"""
self.mean = 0
self.start = start
self.stop = stop
self.step = step
self.indim = indim
self.trainx = zeros((0, indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.testx = self._buildGrid()
self.calculated = True
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
self.autonoise = False
self.hyper = (0.5, 2.0, 0.1)
def _kernel(self, a, b):
""" kernel function, here RBF kernel """
(l, sigma_f, _sigma_n) = self.hyper
r = sigma_f ** 2 * exp(-1.0 / (2 * l ** 2) * norm(a - b, 2) ** 2)
# if a == b:
# r += sigma_n**2
return r
def _buildGrid(self):
(start, stop, step) = (self.start, self.stop, self.step)
""" returns a mgrid type of array for 'dim' dimensions """
if isinstance(start, (int, float, complex)):
dimstr = 'start:stop:step, '*self.indim
else:
assert len(start) == len(stop) == len(step)
dimstr = ["start[%i]:stop[%i]:step[%i], " % (i, i, i) for i in range(len(start))]
dimstr = ''.join(dimstr)
return eval('c_[map(ravel, mgrid[' + dimstr + '])]').T
def _buildCov(self, a, b):
K = zeros((len(a), len(b)), float)
for i in range(len(a)):
for j in range(len(b)):
K[i, j] = self._kernel(a[i, :], b[j, :])
return K
def reset(self):
self.trainx = zeros((0, self.indim), float)
self.trainy = zeros((0), float)
self.noise = zeros((0), float)
self.pred_mean = zeros(len(self.testx))
self.pred_cov = eye(len(self.testx))
def trainOnDataset(self, dataset):
""" takes a SequentialDataSet with indim input dimension and scalar target """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = dataset.getField('input')
self.trainy = ravel(dataset.getField('target'))
self.noise = array([0.001] * len(self.trainx))
# print(self.trainx, self.trainy)
self.calculated = False
def addDataset(self, dataset):
""" adds the points from the dataset to the training set """
assert (dataset.getDimension('input') == self.indim)
assert (dataset.getDimension('target') == 1)
self.trainx = r_[self.trainx, dataset.getField('input')]
self.trainy = r_[self.trainy, ravel(dataset.getField('target'))]
self.noise = array([0.001] * len(self.trainx))
self.calculated = False
def addSample(self, train, target):
self.trainx = r_[self.trainx, asarray([train])]
self.trainy = r_[self.trainy, asarray(target)]
self.noise = r_[self.noise, array([0.001])]
self.calculated = False
def testOnArray(self, arr):
self.testx = arr
self._calculate()
return self.pred_mean
def _calculate(self):
# calculate only of necessary
if len(self.trainx) == 0:
return
# build covariance matrices
train_train = self._buildCov(self.trainx, self.trainx)
train_test = self._buildCov(self.trainx, self.testx)
test_train = train_test.T
test_test = self._buildCov(self.testx, self.testx)
# calculate predictive mean and covariance
K = train_train + self.noise * eye(len(self.trainx))
if self.autonoise:
# calculate average neighboring distance for auto-noise
avgdist = 0
sort_trainx = sort(self.trainx)
for i, d in enumerate(sort_trainx):
if i == 0:
continue
avgdist += d - sort_trainx[i - 1]
avgdist /= len(sort_trainx) - 1
# sort(self.trainx)
# add auto-noise from neighbouring samples (not standard gp)
for i in range(len(self.trainx)):
for j in range(len(self.trainx)):
if norm(self.trainx[i] - self.trainx[j]) > avgdist:
continue
d = norm(self.trainy[i] - self.trainy[j]) / (exp(norm(self.trainx[i] - self.trainx[j])))
K[i, i] += d
self.pred_mean = self.mean + dot(test_train, solve(K, self.trainy - self.mean, sym_pos=0))
self.pred_cov = test_test - dot(test_train, dot(inv(K), train_test))
self.calculated = True
def draw(self):
if not self.calculated:
self._calculate()
return self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov)
def plotCurves(self, showSamples=False, force2D=True):
from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray
if not self.calculated:
self._calculate()
if self.indim == 1:
clf()
hold(True)
if showSamples:
# plot samples (gray)
for _ in range(5):
plot(self.testx, self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov), color='gray')
# plot training set
plot(self.trainx, self.trainy, 'bx')
# plot mean (blue)
plot(self.testx, self.pred_mean, 'b', linewidth=1)
# plot variance (as "polygon" going from left to right for upper half and back for lower half)
fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
fill(fillx, filly, facecolor='gray', edgecolor='white', alpha=0.3)
title('1D Gaussian Process with mean and variance')
elif self.indim == 2 and not force2D:
from matplotlib import axes3d as a3
fig = gcf()
fig.clear()
ax = a3.Axes3D(fig) #@UndefinedVariable
# plot training set
ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), 'ro')
# plot mean
(x, y, z) = [m.reshape(sqrt(len(m)), sqrt(len(m))) for m in (self.testx[:, 0], self.testx[:, 1], self.pred_mean)]
ax.plot_wireframe(x, y, z, colors='gray')
return ax
elif self.indim == 2 and force2D:
# plot mean on pcolor map
gray()
# (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
m = floor(sqrt(len(self.pred_mean)))
pcolor(self.pred_mean.reshape(m, m)[::-1, :])
else: print("plotting only supported for indim=1 or indim=2.")
if __name__ == '__main__':
from pylab import figure, show
# --- example on how to use the GP in 1 dimension
ds = SupervisedDataSet(1, 1)
gp = GaussianProcess(indim=1, start= -3, stop=3, step=0.05)
figure()
x = mgrid[-3:3:0.2]
y = 0.1 * x ** 2 + x + 1
z = sin(x) + 0.5 * cos(y)
ds.addSample(-2.5, -1)
ds.addSample(-1.0, 3)
gp.mean = 0
# new feature "autonoise" adds uncertainty to data depending on
# it's distance to other points in the dataset. not tested much yet.
# gp.autonoise = True
gp.trainOnDataset(ds)
gp.plotCurves(showSamples=True)
# you can also test the gp on single points, but this deletes the
# original testing grid. it can be restored with a call to _buildGrid()
print((gp.testOnArray(array([[0.4]]))))
# --- example on how to use the GP in 2 dimensions
ds = SupervisedDataSet(2, 1)
gp = GaussianProcess(indim=2, start=0, stop=5, step=0.2)
figure()
x, y = mgrid[0:5:4j, 0:5:4j]
z = cos(x) * sin(y)
(x, y, z) = list(map(ravel, [x, y, z]))
for i, j, k in zip(x, y, z):
ds.addSample([i, j], [k])
gp.trainOnDataset(ds)
gp.plotCurves()
show()
| bsd-3-clause |