repo_name
stringlengths
7
92
path
stringlengths
5
129
copies
stringclasses
201 values
size
stringlengths
4
6
content
stringlengths
1.03k
375k
license
stringclasses
15 values
JoeBartelmo/PyDetect
gui/img_proc/GlobalSurveyor.py
2
10667
# Copyright (c) 2016, Jeffrey Maggio and Joseph Bartelmo # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and # associated documentation files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial # portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import Tkinter as tk from PIL import Image, ImageTk import cv2 import sys import numpy import time import matplotlib.path as matpath class GlobalSurveyor(object): def __init__(self, root, ideal_image, number_polys = 2, thresholds = (50, 200), polys = None): self.ideal_im = ideal_image self.parent = root self.done = False self.points = [] self.warningColor = (128, 128, 0) self.errorColor = (255, 0, 0) self.number_polys = number_polys #self.number_sides = number_sides if polys == None: self.polys = [] else: self.polys = polys self.thresh = thresholds self.ideal_counts = [] self.calibrates_polygons() def calibrates_polygons(self): polys = self.define_polygons(self.ideal_im) ideal_kp = self.apply_fast(self.ideal_im) self.ideal_counts = self.keypoints_to_counts(ideal_kp) def _on_left_mouse(self, event): # CITE THIS FROM STACKEXCHANGE # Mouse callback that gets called for every mouse event (i.e. moving, clicking, etc.) if self.done: # Nothing more to do return # Left click means adding a point at current position to the list of points print("Adding point #%d with position(%d,%d)" % (len(self.points), event.x, event.y)) self.points.append((event.x, event.y)) def _on_right_mouse(self, event): # CITE THIS FROM STACKEXCHANGE if len(self.points) > 2: # Right click means we're done print("Completing polygon with %d points." % len(self.points)) self.done = True def define_polygons(self, image): # use this to automate point picking and/or display point picker window in gui FINAL_LINE_COLOR = (255, 0, 0) WORKING_LINE_COLOR = (0, 0, 255) top = tk.Toplevel(self.parent) top.title("Select bounding points for the target area") print image.shape top.update_idletasks() width = image.shape[1] height = image.shape[0] x = (top.winfo_screenwidth() // 2) - (width // 2) y = (top.winfo_screenheight() // 2) - (height // 2) top.geometry('{}x{}+{}+{}'.format(width, height, x, y)) #we are defaulting to the image size, i don't want to deal wiht resizing initial_im = tk.PhotoImage() image_label = tk.Label(top, image=initial_im) image_label.grid(row = 0, column = 0) image_label.bind("<Button-1>", self._on_left_mouse) image_label.bind("<Button-3>", self._on_right_mouse) top.grid() copy = image.copy() def display_image(): imageFromArray = Image.fromarray(copy) try: tkImage = ImageTk.PhotoImage(image=imageFromArray) image_label.configure(image=tkImage) image_label._image_cache = tkImage # avoid garbage collection top.update() return True except RuntimeError: print('Unable to update image frame. Assuming application has been killed unexpectidly.') return False for num in range(self.number_polys): if display_image() == False: top.destroy() return self.points = [] self.done = False while (not self.done): if (len(self.points) > 0): cv2.polylines(copy, numpy.asarray([self.points]), False, FINAL_LINE_COLOR, 1) imageFromArray = Image.fromarray(copy) if display_image() == False: top.destroy() return self.polys.append(self.points) print(self.polys) update = numpy.asarray(self.polys[num]).astype(numpy.int32) if (len(self.points) > 0): cv2.fillPoly(copy, [update], (0,0,255)) display_image() time.sleep(0.5) top.destroy() return self.polys def apply_fast(self, image): fast = cv2.FastFeatureDetector() kp = fast.detect(image, None) return kp def keypoints_to_counts(self, im_keypoints): #polys = numpy.asarray[[[0,0],[0,ideal_im.shape[0]//2],[ideal_im.shape[0]//2,ideal_im.shape[1]],[ideal_im.shape[0]//2, 0]]]): #pass in im_keypoints and count how many are in each polygon #polygon points should also be an input im_keys = [] for i in range(len(im_keypoints)): im_keys.append(im_keypoints[i].pt) # NOTE:: POLYS MUST BE IN X (COL), Y (ROW) FORMAT FOR FILLPOLY TO WORK # NOTE:: POLYS MUST ALSO BE OF DTYPE FLOAT data = self.polys polys = map(lambda data: map(lambda data: map(float, data), data), data) counts = [] for poly in polys: # MATPLOTLIB PATH EXAMPLE GOES HERE bbPath = matpath.Path(poly) # sum boolean array to get number of kp within current poly try: truth_vector = bbPath.contains_points(im_keys) # check input for contains_points. might only like tuples except: truth_vector = [1] * len(im_keys) counts.append(numpy.sum(truth_vector)) return numpy.asarray(counts) def threshold(self, counts, thresh_tuple): # do the thresholding thing delta = abs(counts - self.ideal_counts) threshed_polys = numpy.where((delta >= thresh_tuple[0]) & (delta < thresh_tuple[1]))[0] # ok to index like this because tuple represents vector # save which polys get marked true out so these can be used as overlay in fillpolys # this can be done by indexing into polys with truth vector saved here return threshed_polys.astype(int) # a true/false vector of length polys def generate_output(self, image, threshed_polys, color, alpha = 0.5): #, colorAlert = (0,0,255)): # use polyfill to blend im_in and overlay (which is just the polygons we want colored in) colored_polys = [] for threshed_poly in threshed_polys: colored_polys.append(numpy.array(self.polys[threshed_poly]).astype(numpy.int32)) overlay = image.copy() im_out = image.copy() for cp in range(len(colored_polys)): cv2.fillPoly(overlay, [colored_polys[cp]], color) cv2.addWeighted(overlay, alpha, im_out, 1 - alpha, 0, im_out) return im_out def run_basic_fod(self, current_im): if current_im is None: return None # load ideal image im_in_kp = self.apply_fast(current_im) counts = self.keypoints_to_counts(im_keypoints = im_in_kp) warning_polys = self.threshold(counts, self.thresh) error_polys = self.threshold(counts, (self.thresh[1], sys.maxint)) warning_overlay = self.generate_output(current_im, warning_polys, self.warningColor) return self.generate_output(warning_overlay, error_polys, self.errorColor) def get_thresholds_widget(parent, values_list): top = tk.Toplevel(parent) top.title("Select your desired thresholds") top.update_idletasks() thresh1 = tk.IntVar() thresh2 = tk.IntVar() polys = tk.IntVar() #3 labels and 3 textbox selectors label1 = tk.Label(top, text = "Warning (Maybe an object)") label1.grid(row = 1, column = 0, sticky ="nsew") threshold1 = tk.Entry(top, textvariable=thresh1) threshold1.grid(row = 1, column = 1, sticky ="nsew") label2 = tk.Label(top, text = "Error (Most defintely an object)") label2.grid(row = 2, column = 0, sticky ="nsew") threshold2 = tk.Entry(top, textvariable = thresh2) threshold2.grid(row = 2, column = 1, sticky ="nsew") label3 = tk.Label(top, text = "Number of Polygons") label3.grid(row = 3, column = 0, sticky ="nsew") threshold3 = tk.Entry(top, textvariable = polys) threshold3.grid(row = 3, column = 1, sticky ="nsew") def returnVars(): top.destroy() if len(values_list) != 3: values_list.append(thresh1.get()) values_list.append(thresh2.get()) values_list.append(polys.get()) else: values_list[0] = thresh1.get() values_list[1] = thresh2.get() values_list[2] = polys.get() button = tk.Button(top, text = "Set Values", command = returnVars) button.grid(row = 4, column = 0, columnspan = 2, sticky ="nsew") threshold1.delete(0, "end") threshold2.delete(0, "end") threshold3.delete(0, "end") if len(values_list) == 3: threshold1.insert(0, values_list[0]) threshold2.insert(0, values_list[1]) threshold3.insert(0, values_list[2]) else: threshold1.insert(0, 50) threshold2.insert(0, 200) threshold3.insert(0, 2) width = 300 height = 100 x = (top.winfo_screenwidth() // 2) - (width // 2) y = (top.winfo_screenheight() // 2) - (height // 2) top.geometry('{}x{}+{}+{}'.format(width, height, x, y)) top.grid_columnconfigure(1, weight=1) top.grid_rowconfigure(0, weight=1) top.grid_rowconfigure(1, weight=1) top.grid() top.update() parent.wait_window(top) if __name__=='__main__': root = tk.Tk() vals = [] get_thresholds_widget(root, vals) print 'obtained values', vals get_thresholds_widget(root, vals) print 'obtained values', vals root.mainloop()
mit
wataash/Instr
instr/ke2636a.py
1
3794
import numpy as np import unittest2 from instr.base import SourceMeter class Keithley2636A(SourceMeter): def __init__(self, rsrc=None, timeout_sec=600, reset=True): self._smu = 'a' idn = 'Keithley Instruments Inc., Model 2636A' super().__init__(rsrc, idn, timeout_sec, reset) @property def smu(self): return self._smu @smu.setter def smu(self, value): if value not in ['a', 'b']: raise ValueError self._smu = value def check_error(self): if self._debug_mode: super().check_error() tmp = self.q('print(errorqueue.next())') if tmp != '0.00000e+00\tQueue Is Empty\t0.00000e+00\n': raise RuntimeError('Error on Keithley 2636A.') def reset(self): self.w('reset()', True) # self.w('smua.reset(); smub.reset()', True) def iv_sweep(self, v_start=0.0, v_end=10e-3, v_step=1e-3, v_points=None, i_limit=1e-6, settle_time=0.0, reset=True): """ Reference manual 3-31 TODO: when aborted? :return: vis, is_aborted """ if reset: self.reset() if v_points is None: v_points = self._v_step_to_points(v_start, v_end, v_step) lim = 'smu{}.source.limiti = {}'.format(self.smu, i_limit) self.w(lim, True) meas = 'SweepVLinMeasureI(smu{}, {}, {}, {}, {})'. \ format(self.smu, v_start, v_end, settle_time, v_points) self.w(meas, True) prnt = 'printbuffer(1, {}, smu{}.nvbuffer1.readings)'. \ format(v_points, self.smu) resp = self.q(prnt, True) Is = resp.split(', ') Is = np.asarray(Is, np.float64) if len(Is) != v_points: aborted = True v_points = len(Is) else: aborted = False vs = np.linspace(v_start, v_end, v_points) vis = np.array([vs, Is]).transpose() return vis, aborted def iv_sweep_double(self, v_max, v_step=1e-3, v_points=None, i_limit=1e-3, settle_time=0.0, reset=True): vis1, aborted = self.iv_sweep(0, v_max, v_step, v_points, i_limit, settle_time, reset) if aborted: return vis1, aborted vis2, aborted = self.iv_sweep(v_max, 0, v_step, v_points, i_limit, settle_time, reset) ret = np.concatenate((vis1, vis2)) return ret, aborted class TestKeithley2636A(unittest2.TestCase): def test_iv_sweep(self): import matplotlib.pyplot as plt ke2636a.reset() v_start = 0.0 v_end = 1e-3 self.smu = 'a' vis, aborted = \ ke2636a.iv_sweep(v_start, v_end, v_step=v_end / 10, i_limit=1e-9) plt.plot(*vis.transpose(), 'o-') plt.show() vis, aborted = \ ke2636a.iv_sweep(v_start, v_end, v_points=101, i_limit=1e-6) plt.plot(*vis.transpose(), 'o-') plt.show() # v_step ignored vis, aborted = \ ke2636a.iv_sweep(v_start, v_end, v_step=1, v_points=11) plt.plot(*vis.transpose(), 'o-') plt.show() vis, aborted = ke2636a.iv_sweep_double(10e-3) plt.plot(*vis.transpose(), 'o-') plt.show() self.smu = 'b' vis, aborted = ke2636a.iv_sweep(v_start, v_end, v_points=11) plt.plot(*vis.transpose(), 'o-') plt.show() if __name__ == '__main__': import visa rm = visa.ResourceManager() # ke2636a_rsrc = rm.open_resource('visa://169.254.136.196/GPIB0::20::INSTR') ke2636a_rsrc = rm.open_resource('TCPIP::169.254.000.001::INSTR') ke2636a = Keithley2636A(ke2636a_rsrc) unittest2.main() pass
mit
arank/mxnet
example/reinforcement-learning/ddpg/strategies.py
15
1705
import numpy as np class BaseStrategy(object): """ Base class of exploration strategy. """ def get_action(self, obs, policy): raise NotImplementedError def reset(self): pass class OUStrategy(BaseStrategy): """ Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt where Wt denotes the Wiener process. """ def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3): self.mu = mu self.theta = theta self.sigma = sigma self.action_space = env_spec.action_space self.state = np.ones(self.action_space.flat_dim) * self.mu def evolve_state(self): x = self.state dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x)) self.state = x + dx return self.state def reset(self): self.state = np.ones(self.action_space.flat_dim) * self.mu def get_action(self, obs, policy): # get_action accepts a 2D tensor with one row obs = obs.reshape((1, -1)) action = policy.get_action(obs) increment = self.evolve_state() return np.clip(action + increment, self.action_space.low, self.action_space.high) if __name__ == "__main__": class Env1(object): def __init__(self): self.action_space = Env2() class Env2(object): def __init__(self): self.flat_dim = 2 env_spec = Env1() test = OUStrategy(env_spec) states = [] for i in range(1000): states.append(test.evolve_state()[0]) import matplotlib.pyplot as plt plt.plot(states) plt.show()
apache-2.0
frank-tancf/scikit-learn
benchmarks/bench_multilabel_metrics.py
276
7138
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': partial(f1_score, average='micro'), 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: {}'.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
JensTimmerman/radical.pilot
src/radical/pilot/utils/analysis.py
1
12671
import os # ------------------------------------------------------------------------------ # def get_experiment_frames(experiments, datadir=None): """ read profiles for all sessions in the given 'experiments' dict. That dict is expected to be like this: { 'test 1' : [ [ 'rp.session.thinkie.merzky.016609.0007', 'stampede popen sleep 1/1/1/1 (?)'] ], 'test 2' : [ [ 'rp.session.ip-10-184-31-85.merzky.016610.0112', 'stampede shell sleep 16/8/8/4' ] ], 'test 3' : [ [ 'rp.session.ip-10-184-31-85.merzky.016611.0013', 'stampede shell mdrun 16/8/8/4' ] ], 'test 4' : [ [ 'rp.session.titan-ext4.marksant1.016607.0005', 'titan shell sleep 1/1/1/1 a' ] ], 'test 5' : [ [ 'rp.session.titan-ext4.marksant1.016607.0006', 'titan shell sleep 1/1/1/1 b' ] ], 'test 6' : [ [ 'rp.session.ip-10-184-31-85.merzky.016611.0013', 'stampede - isolated', ], [ 'rp.session.ip-10-184-31-85.merzky.016612.0012', 'stampede - integrated', ], [ 'rp.session.titan-ext4.marksant1.016607.0006', 'blue waters - integrated' ] ] } name in ie. iname in t is a list of experiment names, and each label has a list of session/label pairs, where the label will be later used to label (duh) plots. we return a similar dict where the session IDs are data frames """ import pandas as pd exp_frames = dict() if not datadir: datadir = os.getcwd() print 'reading profiles in %s' % datadir for exp in experiments: print " - %s" % exp exp_frames[exp] = list() for sid, label in experiments[exp]: print " - %s" % sid import glob for prof in glob.glob ("%s/%s-pilot.*.prof" % (datadir, sid)): print " - %s" % prof frame = get_profile_frame (prof) exp_frames[exp].append ([frame, label]) return exp_frames # ------------------------------------------------------------------------------ # def get_profile_frame (prof): import pandas as pd return pd.read_csv(prof) # ------------------------------------------------------------------------------ # tmp = None def add_concurrency (frame, tgt, spec): """ add a column 'tgt' which is a cumulative sum of conditionals of enother row. The purpose is the following: if a unit enters a component, the tgt row counter is increased by 1, if the unit leaves the component, the counter is decreases by 1. For any time, the resulting row contains the number of units which is in the component. Or state. Or whatever. The arguments are: 'tgt' : name of the new column 'spec' : a set of filters to determine if a unit enters or leaves 'spec' is expected to be a dict of the following format: spec = { 'in' : [{'col1' : 'pat1', 'col2' : 'pat2'}, ...], 'out' : [{'col3' : 'pat3', 'col4' : 'pat4'}, ...] } where: 'in' : filter set to determine the unit entering 'out' : filter set to determine the unit leaving 'col' : name of column for which filter is defined 'event' : event which correlates to entering/leaving 'msg' : qualifier on the event, if event is not unique Example: spec = {'in' : [{'state' :'Executing'}], 'out' : [{'state' :'Done'}, {'state' :'Failed'}, {'state' :'Cancelled'}] } get_concurrency (df, 'concurrently_running', spec) """ import numpy # create a temporary row over which we can do the commulative sum # -------------------------------------------------------------------------- def _conc (row, spec): # row must match any filter dict in 'spec[in/out]' # for any filter dict it must match all col/pat pairs # for each in filter for f in spec['in']: match = 1 # for each col/val in that filter for col, pat in f.iteritems(): if row[col] != pat: match = 0 break if match: # one filter matched! # print " + : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message']) return 1 # for each out filter for f in spec['out']: match = 1 # for each col/val in that filter for col, pat in f.iteritems(): if row[col] != pat: match = 0 break if match: # one filter matched! # print " - : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message']) return -1 # no filter matched # print " : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message']) return 0 # -------------------------------------------------------------------------- # we only want to later look at changes of the concurrency -- leading or trailing # idle times are to be ignored. We thus set repeating values of the cumsum to NaN, # so that they can be filtered out when ploting: df.dropna().plot(...). # That specifically will limit the plotted time range to the area of activity. # The full time range can still be plotted when ommitting the dropna() call. # -------------------------------------------------------------------------- def _time (x): global tmp if x != tmp: tmp = x else : x = numpy.NaN return x # -------------------------------------------------------------------------- # sanitize concurrency: negative values indicate incorrect event ordering, # so we set the repesctive values to 0 # -------------------------------------------------------------------------- def _abs (x): if x < 0: return numpy.NaN return x # -------------------------------------------------------------------------- frame[tgt] = frame.apply(lambda row: _conc(row, spec), axis=1).cumsum() frame[tgt] = frame.apply(lambda row: _abs (row[tgt]), axis=1) frame[tgt] = frame.apply(lambda row: _time(row[tgt]), axis=1) # print frame[[tgt, 'time']] # ------------------------------------------------------------------------------ # t0 = None def calibrate_frame(frame, spec): """ move the time axis of a profiling frame so that t_0 is at the first event matching the given 'spec'. 'spec' has the same format as described in 'add_concurrency' (list of dicts with col:pat filters) """ # -------------------------------------------------------------------------- def _find_t0 (row, spec): # row must match any filter dict in 'spec[in/out]' # for any filter dict it must match all col/pat pairs global t0 if t0 is not None: # already found t0 return # for each col/val in that filter for f in spec: match = 1 for col, pat in f.iteritems(): if row[col] != pat: match = 0 break if match: # one filter matched! t0 = row['time'] return # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- def _calibrate (row, t0): if t0 is None: # no t0... return return row['time'] - t0 # -------------------------------------------------------------------------- # we need to iterate twice over the frame: first to find t0, then to # calibrate the time axis global t0 t0 = None # no t0 frame.apply(lambda row: _find_t0 (row, spec), axis=1) if t0 == None: print "Can't recalibrate, no matching timestamp found" return frame['time'] = frame.apply(lambda row: _calibrate(row, t0 ), axis=1) # ------------------------------------------------------------------------------ # def create_plot(): """ create a plot object and tune its layout to our liking. """ import matplotlib.pyplot as plt fig, plot = plt.subplots(figsize=(12,6)) plot.xaxis.set_tick_params(width=1, length=7) plot.yaxis.set_tick_params(width=1, length=7) plot.spines['right' ].set_position(('outward', 10)) plot.spines['top' ].set_position(('outward', 10)) plot.spines['bottom'].set_position(('outward', 10)) plot.spines['left' ].set_position(('outward', 10)) plt.xticks(fontsize=14) plt.yticks(fontsize=14) fig.tight_layout() return fig, plot # ------------------------------------------------------------------------------ # def frame_plot (frames, axis, title=None, logx=False, logy=False, legend=True, figdir=None): """ plot the given axis from the give data frame. We create a plot, and plot all frames given in the list. The list is expected to contain [frame,label] pairs frames: list of tuples of dataframes and labels frames = [[stampede_df_1, 'stampede - popen'], [stampede_df_2, 'stampede - shell'], [stampede_df_3, 'stampede - ORTE' ]] axis: tuple of data frame column index and axis label axis = ['time', 'time (s)'] """ # create figure and layout fig, plot = create_plot() # set plot title if title: plot.set_title(title, y=1.05, fontsize=18) # plot the data frames # NOTE: we need to set labels separately, because of # https://github.com/pydata/pandas/issues/9542 labels = list() for frame, label in frames: try: frame.dropna().plot(ax=plot, logx=logx, logy=logy, x=axis[0][0], y=axis[1][0], drawstyle='steps', label=label, legend=False) except Exception as e: print "skipping frame '%s': '%s'" % (label, e) if legend: plot.legend(labels=labels, loc='upper right', fontsize=14, frameon=True) # set axis labels plot.set_xlabel(axis[0][1], fontsize=14) plot.set_ylabel(axis[1][1], fontsize=14) plot.set_frame_on(True) # save as png and pdf. Use the title as base for names if title: base = title else : base = "%s_%s" % (axis[0][1], axis[1][1]) # clean up base name -- only keep alphanum and such import re base = re.sub('[^a-zA-Z0-9\.\-]', '_', base) base = re.sub('_+', '_', base) if not figdir: figdir = os.getcwd() print 'saving %s/%s.png' % (figdir, base) fig.savefig('%s/%s.png' % (figdir, base), bbox_inches='tight') print 'saving %s/%s.pdf' % (figdir, base) fig.savefig('%s/%s.pdf' % (figdir, base), bbox_inches='tight') return fig, plot # ------------------------------------------------------------------------------ # def create_analytical_frame (idx, kind, args, limits, step): """ create an artificial data frame, ie. a data frame which does not contain data gathered from an experiment, but data representing an analytical construct of some 'kind'. idx: data frame column index to fill (a time column is always created) kind: construct to use (only 'rate' is supporte right now) args: construct specific parameters limits: time range for which data are to be created step: time steps for which data are to be created """ import pandas as pd # -------------------------------------------------------------------------- def _frange(start, stop, step): while start <= stop: yield start start += step # -------------------------------------------------------------------------- if kind == 'rate' : t_0 = args.get ('t_0', 0.0) rate = args.get ('rate', 1.0) data = list() for t in _frange(limits[0], limits[1], step): data.append ({'time': t+t_0, idx: t*rate}) return pd.DataFrame (data) else: raise ValueError ("No such frame kind '%s'" % kind) # ------------------------------------------------------------------------------
mit
sbg2133/miscellaneous_projects
carina/ItoNH.py
1
1115
import numpy as np import matplotlib.pyplot as plt from astropy.io import fits import aplpy from astropy.wcs import WCS import sys, os from getIQU import IQU from astropy import coordinates as coord from astropy.coordinates import SkyCoord from astropy import units as u from scipy.interpolate import griddata plt.ion() root_dir = '/home/wizwit/miscellaneous_projects/carina/carinaData' blast250_file = os.path.join(root_dir, 'smooth/3.0_arcmin/carinaneb_250_smoothed_3.0_rl.fits') beta = 1.27 def getPsi(path_to_file): I, Q, U, __, wcs = IQU(path_to_file) Pvals = np.sqrt(Q**2 + U**2) pvals = Pvals/I # pvals /= pol_eff[band_idx] psi = 0.5*np.arctan2(U,Q) return I, Q, U, wcs, psi I, __, __, wcs_250, __, = getPsi(blast250_file) #tau_d = (nu/nu0)**beta # See Walker pg. 71 # nu0 = frequency at which dust emission becomes optically thin #nu0 = 0.103 * Td # 0.103 (THz/K) * Td #Inu_dust = Bnu(Td)*(1.0 - np.exp(1.0 - e**(-1.0*tau_d)) # See Walker pg. 69 # Av = 1.086*tau_d # N_H = 1.79e21 * Av # (atoms/cm**2 mag) # 1) Solve tau_d for temperature # 2) Plug into Inu_dust equation
gpl-3.0
blink1073/scikit-image
doc/examples/edges/plot_active_contours.py
4
3317
""" ==================== Active Contour Model ==================== The active contour model is a method to fit open or closed splines to lines or edges in an image. It works by minimising an energy that is in part defined by the image and part by the spline's shape: length and smoothness. The minimization is done implicitly in the shape energy and explicitly in the image energy. In the following two examples the active contour model is used (1) to segment the face of a person from the rest of an image by fitting a closed curve to the edges of the face and (2) to find the darkest curve between two fixed points while obeying smoothness considerations. Typically it is a good idea to smooth images a bit before analyzing, as done in the following examples. .. [1] *Snakes: Active contour models*. Kass, M.; Witkin, A.; Terzopoulos, D. International Journal of Computer Vision 1 (4): 321 (1988). We initialize a circle around the astronaut's face and use the default boundary condition ``bc='periodic'`` to fit a closed curve. The default parameters ``w_line=0, w_edge=1`` will make the curve search towards edges, such as the boundaries of the face. """ import numpy as np import matplotlib.pyplot as plt from skimage.color import rgb2gray from skimage import data from skimage.filters import gaussian_filter from skimage.segmentation import active_contour # Test scipy version, since active contour is only possible # with recent scipy version import scipy scipy_version = list(map(int, scipy.__version__.split('.'))) new_scipy = scipy_version[0] > 0 or \ (scipy_version[0] == 0 and scipy_version[1] >= 14) img = data.astronaut() img = rgb2gray(img) s = np.linspace(0, 2*np.pi, 400) x = 220 + 100*np.cos(s) y = 100 + 100*np.sin(s) init = np.array([x, y]).T if not new_scipy: print('You are using an old version of scipy. ' 'Active contours is implemented for scipy versions ' '0.14.0 and above.') if new_scipy: snake = active_contour(gaussian_filter(img, 3), init, alpha=0.015, beta=10, gamma=0.001) fig = plt.figure(figsize=(7, 7)) ax = fig.add_subplot(111) plt.gray() ax.imshow(img) ax.plot(init[:, 0], init[:, 1], '--r', lw=3) ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) ax.set_xticks([]), ax.set_yticks([]) ax.axis([0, img.shape[1], img.shape[0], 0]) """ .. image:: PLOT2RST.current_figure Here we initialize a straight line between two points, `(5, 136)` and `(424, 50)`, and require that the spline has its end points there by giving the boundary condition `bc='fixed'`. We furthermore make the algorithm search for dark lines by giving a negative `w_line` value. """ img = data.text() x = np.linspace(5, 424, 100) y = np.linspace(136, 50, 100) init = np.array([x, y]).T if new_scipy: snake = active_contour(gaussian_filter(img, 1), init, bc='fixed', alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1) fig = plt.figure(figsize=(9, 5)) ax = fig.add_subplot(111) plt.gray() ax.imshow(img) ax.plot(init[:, 0], init[:, 1], '--r', lw=3) ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3) ax.set_xticks([]), ax.set_yticks([]) ax.axis([0, img.shape[1], img.shape[0], 0]) plt.show() """ .. image:: PLOT2RST.current_figure """
bsd-3-clause
KitwareMedical/ITKTubeTK
examples/archive/SegmentVesselsUsingNeuralNetworks/scripts/PreProcessing.py
4
6399
#!/usr/bin/python ########################################################################### # PreProcessing.py : # # Iterate through the expert labelmap and create 65x65 patches around the # central pixel. All positive pixels are used as positives input cases. # The same amount of negatives is randomly picked. For each input patch, # the corresponding filename and expected output are written to a text file # and will be used later to create the database. # ########################################################################### import os import glob import json import matplotlib.image as mpimg import matplotlib.pyplot as plt import random import math # Create image set and save expected output def createImgSet(expertImg, inputImg, filenamePrefix, fileOutputDir, textFile): w = 32 # Patch size count = 0 # Input count negativeImageIndex = [] # Whole image index giving negative output negativeIndex = [] # Vessel bound index giving negative output # Write filename and expected output textFile = open(textFile, "a") textFile.truncate() # Erase file resample = 1 # WARNING: resample pixels to reduce training size for Debug # Iterate through the expert label map for i in range(0, expertImg.shape[0], resample): for j in range(0, expertImg.shape[1], resample): if j > w and j + w + 1 < inputImg.shape[1]: if i > w and i + w + 1 < inputImg.shape[0]: # Centerline pixel (positive) if expertImg[i, j] > 0.5: count += 1 filename = os.path.join( "1", filenamePrefix + "_" + str(i) + "_" + str(j) + ".png") textFile.write(filename + " " + str(1) + "\n") plt.imsave(os.path.join(fileOutputDir, filename), inputImg[i - w:i + w + 1, j - w:j + w + 1], cmap='Greys_r') # Vessel bound pixel (negative) elif expertImg[i, j] > 0: negativeIndex.append([i, j]) # Background pixel (negative) else: negativeImageIndex.append([i, j]) # Pick random negatives from vessel bound rndmNegativeInd = random.sample(negativeIndex, int(math.ceil(0.8 * count))) for [i, j] in rndmNegativeInd: filename = os.path.join("0", filenamePrefix + "_" + str(i) + "_" + str(j) + ".png") textFile.write(filename + " " + str(0) + "\n") plt.imsave(os.path.join(fileOutputDir, filename), inputImg[i - w:i + w + 1, j - w:j + w + 1], cmap='Greys_r') # Pick random negatives from the entire image rndmNegativeImageInd = random.sample( negativeImageIndex, int(math.ceil(0.2 * count))) for [i, j] in rndmNegativeImageInd: filename = os.path.join("0", filenamePrefix + "_" + str(i) + "_" + str(j) + ".png") textFile.write(filename + " " + str(0) + "\n") plt.imsave(os.path.join(fileOutputDir, filename), inputImg[i - w:i + w + 1, j - w:j + w + 1], cmap='Greys_r') textFile.close() print(count) ######## # Main # ######## # Path variable script_params = json.load(open('params.json')) caffe_root = script_params['CAFFE_SRC_ROOT'] hardDrive_root = script_params['CNN_DATA_ROOT'] proj_rel_path = script_params['PROJECT_REL_PATH'] caffe_proj_root = os.path.join(caffe_root, "data", proj_rel_path) hardDrive_proj_root = os.path.join(hardDrive_root, proj_rel_path) trainDataDir = os.path.join(hardDrive_proj_root, "training") valDataDir = os.path.join(hardDrive_proj_root, "testing") # Text file trainFilename = os.path.join(caffe_proj_root, "train.txt") trainFile = open(trainFilename, "w+") trainFile.truncate() # Erase file trainFile.close() valFilename = os.path.join(caffe_proj_root, "val.txt") valFile = open(valFilename, "w+") valFile.truncate() # Erase file valFile.close() # Output patches directories trainFileOutputDir = os.path.join(trainDataDir, "out") if not os.path.exists(trainFileOutputDir): os.mkdir(trainFileOutputDir) for label in range(2): curLabelOutputDir = os.path.join(trainFileOutputDir, str(label)) if not os.path.exists(curLabelOutputDir): os.mkdir(curLabelOutputDir) valFileOutputDir = os.path.join(valDataDir, "out") if not os.path.exists(valFileOutputDir): os.mkdir(valFileOutputDir) for label in range(2): curLabelOutputDir = os.path.join(valFileOutputDir, str(label)) if not os.path.exists(curLabelOutputDir): os.mkdir(curLabelOutputDir) # Images directories trainExpertDir = os.path.join(trainDataDir, "expert") trainImgDir = os.path.join(trainDataDir, "images") valExpertDir = os.path.join(valDataDir, "expert") valImgDir = os.path.join(valDataDir, "images") # Create train set trainImages = glob.glob(os.path.join(trainImgDir, "*.png")) for trainImage in trainImages: print(trainImage) # Get image ID trainImagePrefix = os.path.basename(os.path.splitext(trainImage)[0]) # Set filename trainExpertFile = os.path.join( trainExpertDir, trainImagePrefix + "_expert.png") trainImageFile = os.path.join(trainImgDir, trainImagePrefix + ".png") # Load images trainExpert = mpimg.imread(trainExpertFile) # print trainExpert.shape # trainExpert=trainExpert[:,:,0] trainImg = mpimg.imread(trainImageFile) # trainImg=trainImg[:,:,0] # Write images and text files createImgSet(trainExpert, trainImg, trainImagePrefix, trainFileOutputDir, trainFilename) # Create validation set valImages = glob.glob(os.path.join(valImgDir, "*.png")) for valImage in valImages: print(valImage) # Get image ID valImagePrefix = os.path.basename(os.path.splitext(valImage)[0]) # Set filename valExpertFilename = os.path.join( valExpertDir, valImagePrefix + "_expert.png") valImgFilename = os.path.join(valImgDir, valImagePrefix + ".png") # Load images valExpert = mpimg.imread(valExpertFilename) # valExpert=valExpert[:,:,0] valImg = mpimg.imread(valImgFilename) # valImg=valImg[:,:,0] # Write images and text files createImgSet(valExpert, valImg, valImagePrefix, valFileOutputDir, valFilename)
apache-2.0
mrshu/scikit-learn
examples/plot_permutation_test_for_classification.py
1
2236
""" ================================================================= Test with permutations the significance of a classification score ================================================================= In order to test if a classification score is significative a technique in repeating the classification procedure after randomizing, permuting, the labels. The p-value is then given by the percentage of runs for which the score obtained is greater than the classification score obtained in the first place. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD print __doc__ import numpy as np import pylab as pl from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold, permutation_test_score from sklearn import datasets from sklearn.metrics import zero_one_score ############################################################################## # Loading a dataset iris = datasets.load_iris() X = iris.data y = iris.target n_classes = np.unique(y).size # Some noisy data not correlated random = np.random.RandomState(seed=0) E = random.normal(size=(len(X), 2200)) # Add noisy data to the informative features for make the task harder X = np.c_[X, E] svm = SVC(kernel='linear') cv = StratifiedKFold(y, 2) score, permutation_scores, pvalue = permutation_test_score( svm, X, y, zero_one_score, cv=cv, n_permutations=100, n_jobs=1) print "Classification score %s (pvalue : %s)" % (score, pvalue) ############################################################################### # View histogram of permutation scores pl.hist(permutation_scores, 20, label='Permutation scores') ylim = pl.ylim() # BUG: vlines(..., linestyle='--') fails on older versions of matplotlib #pl.vlines(score, ylim[0], ylim[1], linestyle='--', # color='g', linewidth=3, label='Classification Score' # ' (pvalue %s)' % pvalue) #pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--', # color='k', linewidth=3, label='Luck') pl.plot(2 * [score], ylim, '--g', linewidth=3, label='Classification Score' ' (pvalue %s)' % pvalue) pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck') pl.ylim(ylim) pl.legend() pl.xlabel('Score') pl.show()
bsd-3-clause
aabadie/scikit-learn
examples/mixture/plot_concentration_prior.py
25
5631
""" ======================================================================== Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture ======================================================================== This example plots the ellipsoids obtained from a toy dataset (mixture of three Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a Dirichlet distribution prior (``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet process prior (``weight_concentration_prior_type='dirichlet_process'``). On each figure, we plot the results for three different values of the weight concentration prior. The ``BayesianGaussianMixture`` class can adapt its number of mixture componentsautomatically. The parameter ``weight_concentration_prior`` has a direct link with the resulting number of components with non-zero weights. Specifying a low value for the concentration prior will make the model put most of the weight on few components set the remaining components weights very close to zero. High values of the concentration prior will allow a larger number of components to be active in the mixture. The Dirichlet process prior allows to define an infinite number of components and automatically selects the correct number of components: it activates a component only if it is necessary. On the contrary the classical finite mixture model with a Dirichlet distribution prior will favor more uniformly weighted components and therefore tends to divide natural clusters into unnecessary sub-components. """ # Author: Thierry Guillemot <thierry.guillemot.work@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from sklearn.mixture import BayesianGaussianMixture print(__doc__) def plot_ellipses(ax, weights, means, covars): for n in range(means.shape[0]): eig_vals, eig_vecs = np.linalg.eigh(covars[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) # Ellipse needs degrees angle = 180 * angle / np.pi # eigenvector normalization eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1], 180 + angle) ell.set_clip_box(ax.bbox) ell.set_alpha(weights[n]) ell.set_facecolor('#56B4E9') ax.add_artist(ell) def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8) ax1.set_xlim(-2., 2.) ax1.set_ylim(-3., 3.) ax1.set_xticks(()) ax1.set_yticks(()) plot_ellipses(ax1, estimator.weights_, estimator.means_, estimator.covariances_) ax2.get_xaxis().set_tick_params(direction='out') ax2.yaxis.grid(True, alpha=0.7) for k, w in enumerate(estimator.weights_): ax2.bar(k - .45, w, width=0.9, color='#56B4E9', zorder=3) ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.), horizontalalignment='center') ax2.set_xlim(-.6, 2 * n_components - .4) ax2.set_ylim(0., 1.1) ax2.tick_params(axis='y', which='both', left='off', right='off', labelleft='off') ax2.tick_params(axis='x', which='both', top='off') if plot_title: ax1.set_ylabel('Estimated Mixtures') ax2.set_ylabel('Weight of each component') # Parameters of the dataset random_state, n_components, n_features = 2, 3, 2 colors = np.array(['#0072B2', '#F0E442', '#D55E00']) covars = np.array([[[.7, .0], [.0, .1]], [[.5, .0], [.0, .1]], [[.5, .0], [.0, .1]]]) samples = np.array([200, 500, 200]) means = np.array([[.0, -.70], [.0, .0], [.0, .70]]) # mean_precision_prior= 0.8 to minimize the influence of the prior estimators = [ ("Finite mixture with a Dirichlet distribution\nprior and " r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_distribution", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [0.001, 1, 1000]), ("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_process", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [1, 1000, 100000])] # Generate data rng = np.random.RandomState(random_state) X = np.vstack([ rng.multivariate_normal(means[j], covars[j], samples[j]) for j in range(n_components)]) y = np.concatenate([j * np.ones(samples[j], dtype=int) for j in range(n_components)]) # Plot results in two different figures for (title, estimator, concentrations_prior) in estimators: plt.figure(figsize=(4.7 * 3, 8)) plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05, left=.03, right=.99) gs = gridspec.GridSpec(3, len(concentrations_prior)) for k, concentration in enumerate(concentrations_prior): estimator.weight_concentration_prior = concentration estimator.fit(X) plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator, X, y, r"%s$%.1e$" % (title, concentration), plot_title=k == 0) plt.show()
bsd-3-clause
jeffery-do/Vizdoombot
doom/lib/python3.5/site-packages/skimage/viewer/canvastools/linetool.py
43
6911
import numpy as np from matplotlib import lines from ...viewer.canvastools.base import CanvasToolBase, ToolHandles __all__ = ['LineTool', 'ThickLineTool'] class LineTool(CanvasToolBase): """Widget for line selection in a plot. Parameters ---------- manager : Viewer or PlotPlugin. Skimage viewer or plot plugin object. on_move : function Function called whenever a control handle is moved. This function must accept the end points of line as the only argument. on_release : function Function called whenever the control handle is released. on_enter : function Function called whenever the "enter" key is pressed. maxdist : float Maximum pixel distance allowed when selecting control handle. line_props : dict Properties for :class:`matplotlib.lines.Line2D`. handle_props : dict Marker properties for the handles (also see :class:`matplotlib.lines.Line2D`). Attributes ---------- end_points : 2D array End points of line ((x1, y1), (x2, y2)). """ def __init__(self, manager, on_move=None, on_release=None, on_enter=None, maxdist=10, line_props=None, handle_props=None, **kwargs): super(LineTool, self).__init__(manager, on_move=on_move, on_enter=on_enter, on_release=on_release, **kwargs) props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt') props.update(line_props if line_props is not None else {}) self.linewidth = props['linewidth'] self.maxdist = maxdist self._active_pt = None x = (0, 0) y = (0, 0) self._end_pts = np.transpose([x, y]) self._line = lines.Line2D(x, y, visible=False, animated=True, **props) self.ax.add_line(self._line) self._handles = ToolHandles(self.ax, x, y, marker_props=handle_props) self._handles.set_visible(False) self.artists = [self._line, self._handles.artist] if on_enter is None: def on_enter(pts): x, y = np.transpose(pts) print("length = %0.2f" % np.sqrt(np.diff(x)**2 + np.diff(y)**2)) self.callback_on_enter = on_enter self.manager.add_tool(self) @property def end_points(self): return self._end_pts.astype(int) @end_points.setter def end_points(self, pts): self._end_pts = np.asarray(pts) self._line.set_data(np.transpose(pts)) self._handles.set_data(np.transpose(pts)) self._line.set_linewidth(self.linewidth) self.set_visible(True) self.redraw() def hit_test(self, event): if event.button != 1 or not self.ax.in_axes(event): return False idx, px_dist = self._handles.closest(event.x, event.y) if px_dist < self.maxdist: self._active_pt = idx return True else: self._active_pt = None return False def on_mouse_press(self, event): self.set_visible(True) if self._active_pt is None: self._active_pt = 0 x, y = event.xdata, event.ydata self._end_pts = np.array([[x, y], [x, y]]) def on_mouse_release(self, event): if event.button != 1: return self._active_pt = None self.callback_on_release(self.geometry) self.redraw() def on_move(self, event): if event.button != 1 or self._active_pt is None: return if not self.ax.in_axes(event): return self.update(event.xdata, event.ydata) self.callback_on_move(self.geometry) def update(self, x=None, y=None): if x is not None: self._end_pts[self._active_pt, :] = x, y self.end_points = self._end_pts @property def geometry(self): return self.end_points class ThickLineTool(LineTool): """Widget for line selection in a plot. The thickness of the line can be varied using the mouse scroll wheel, or with the '+' and '-' keys. Parameters ---------- manager : Viewer or PlotPlugin. Skimage viewer or plot plugin object. on_move : function Function called whenever a control handle is moved. This function must accept the end points of line as the only argument. on_release : function Function called whenever the control handle is released. on_enter : function Function called whenever the "enter" key is pressed. on_change : function Function called whenever the line thickness is changed. maxdist : float Maximum pixel distance allowed when selecting control handle. line_props : dict Properties for :class:`matplotlib.lines.Line2D`. handle_props : dict Marker properties for the handles (also see :class:`matplotlib.lines.Line2D`). Attributes ---------- end_points : 2D array End points of line ((x1, y1), (x2, y2)). """ def __init__(self, manager, on_move=None, on_enter=None, on_release=None, on_change=None, maxdist=10, line_props=None, handle_props=None): super(ThickLineTool, self).__init__(manager, on_move=on_move, on_enter=on_enter, on_release=on_release, maxdist=maxdist, line_props=line_props, handle_props=handle_props) if on_change is None: def on_change(*args): pass self.callback_on_change = on_change def on_scroll(self, event): if not event.inaxes: return if event.button == 'up': self._thicken_scan_line() elif event.button == 'down': self._shrink_scan_line() def on_key_press(self, event): if event.key == '+': self._thicken_scan_line() elif event.key == '-': self._shrink_scan_line() def _thicken_scan_line(self): self.linewidth += 1 self.update() self.callback_on_change(self.geometry) def _shrink_scan_line(self): if self.linewidth > 1: self.linewidth -= 1 self.update() self.callback_on_change(self.geometry) if __name__ == '__main__': # pragma: no cover from ... import data from ...viewer import ImageViewer image = data.camera() viewer = ImageViewer(image) h, w = image.shape line_tool = ThickLineTool(viewer) line_tool.end_points = ([w/3, h/2], [2*w/3, h/2]) viewer.show()
mit
CopyChat/Plotting
Python/PythonNetCDF.py
1
10821
''' NAME NetCDF with Python PURPOSE To demonstrate how to read and write data with NetCDF files using a NetCDF file from the NCEP/NCAR Reanalysis. Plotting using Matplotlib and Basemap is also shown. PROGRAMMER(S) Chris Slocum REVISION HISTORY 20140320 -- Initial version created and posted online 20140722 -- Added basic error handling to ncdump Thanks to K.-Michael Aye for highlighting the issue REFERENCES netcdf4-python -- http://code.google.com/p/netcdf4-python/ NCEP/NCAR Reanalysis -- Kalnay et al. 1996 http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2 ''' import datetime as dt # Python standard library datetime module import numpy as np from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid def ncdump(nc_fid, verb=True): ''' ncdump outputs dimensions, variables and their attribute information. The information is similar to that of NCAR's ncdump utility. ncdump requires a valid instance of Dataset. Parameters ---------- nc_fid : netCDF4.Dataset A netCDF4 dateset object verb : Boolean whether or not nc_attrs, nc_dims, and nc_vars are printed Returns ------- nc_attrs : list A Python list of the NetCDF file global attributes nc_dims : list A Python list of the NetCDF file dimensions nc_vars : list A Python list of the NetCDF file variables ''' def print_ncattr(key): """ Prints the NetCDF file attributes for a given key Parameters ---------- key : unicode a valid netCDF4.Dataset.variables key """ try: print "\t\ttype:", repr(nc_fid.variables[key].dtype) for ncattr in nc_fid.variables[key].ncattrs(): print '\t\t%s:' % ncattr,\ repr(nc_fid.variables[key].getncattr(ncattr)) except KeyError: print "\t\tWARNING: %s does not contain variable attributes" % key # NetCDF global attributes nc_attrs = nc_fid.ncattrs() if verb: print "NetCDF Global Attributes:" for nc_attr in nc_attrs: print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr)) nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions # Dimension shape information. if verb: print "NetCDF dimension information:" for dim in nc_dims: print "\tName:", dim print "\t\tsize:", len(nc_fid.dimensions[dim]) print_ncattr(dim) # Variable information. nc_vars = [var for var in nc_fid.variables] # list of nc variables if verb: print "NetCDF variable information:" for var in nc_vars: if var not in nc_dims: print '\tName:', var print "\t\tdimensions:", nc_fid.variables[var].dimensions print "\t\tsize:", nc_fid.variables[var].size print_ncattr(var) return nc_attrs, nc_dims, nc_vars nc_f = './CLM45_Micro_UW_SRF.2005120100.for.test.nc' # Your filename nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file # and create an instance of the ncCDF4 class nc_attrs, nc_dims, nc_vars = ncdump(nc_fid) # Extract data from NetCDF file lats = nc_fid.variables['xlat'][:] # extract/copy the data lons = nc_fid.variables['xlon'][:] time = nc_fid.variables['time'][:] rsds = nc_fid.variables['rsds'][:] # shape is time, lat, lon as shown above time_idx = 237 # some random day in 2012 # Python and the renalaysis are slightly off in time so this fixes that problem offset = dt.timedelta(hours=48) # List of all times in the file as datetime objects dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t/20) - offset\ for t in time] cur_time = dt_time[time_idx] # Plot of global temperature on our random day fig = plt.figure() fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9) # Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html # for other projections. m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() m.drawmapboundary() # Make the plot continuous test=rsds[0,:,:] print test.shape print rsds.shape print lons.shape rsds_cyclic, lons_cyclic = addcyclic(rsds[time_idx,:,:], lons) # Shift the grid so lons go from -180 to 180 instead of 0 to 360. rsds_cyclic, lons_cyclic = shiftgrid(180., rsds_cyclic, lons_cyclic, start=False) # Create 2D lat/lon arrays for Basemap lon2d, lat2d = np.meshgrid(lons_cyclic, lats) # Transforms lat/lon into plotting coordinates for projection x, y = m(lon2d, lat2d) # Plot of rsds temperature with 11 contour intervals cs = m.contourf(x, y, rsds_cyclic, 11, cmap=plt.cm.Spectral_r) cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5) cbar.set_label("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.title("%s on %s" % (nc_fid.variables['rsds'].var_desc, cur_time)) # Writing NetCDF files # For this example, we will create two NetCDF4 files. One with the global rsds # temperature departure from its value at Darwin, Australia. The other with # the temperature profile for the entire year at Darwin. darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83} # Find the nearest latitude and longitude for Darwin lat_idx = np.abs(lats - darwin['lat']).argmin() lon_idx = np.abs(lons - darwin['lon']).argmin() # Simple example: temperature profile for the entire year at Darwin. # Open a new NetCDF file to write the data to. For format, you can choose from # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4' w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4') w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\ (nc_fid.variables['rsds'].var_desc.lower(),\ darwin['name'], nc_fid.description) # Using our previous dimension info, we can create the new time dimension # Even though we know the size, we are going to set the size to unknown w_nc_fid.createDimension('time', None) w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\ ('time',)) # You can do this step yourself but someone else did the work for us. for ncattr in nc_fid.variables['time'].ncattrs(): w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr)) # Assign the dimension data to the new NetCDF file. w_nc_fid.variables['time'][:] = time w_nc_var = w_nc_fid.createVariable('rsds', 'f8', ('time')) w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\ 'units': u"degK", 'level_desc': u'Surface',\ 'var_desc': u"Air temperature",\ 'statistic': u'Mean\nM'}) w_nc_fid.variables['rsds'][:] = rsds[time_idx, lat_idx, lon_idx] w_nc_fid.close() # close the new file # A plot of the temperature profile for Darwin in 2012 fig = plt.figure() plt.plot(dt_time, rsds[:, lat_idx, lon_idx], c='r') plt.plot(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], c='b', marker='o') plt.text(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], cur_time,\ ha='right') fig.autofmt_xdate() plt.ylabel("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.xlabel("Time") plt.title("%s from\n%s for %s" % (nc_fid.variables['rsds'].var_desc,\ darwin['name'], cur_time.year)) # Complex example: global temperature departure from its value at Darwin departure = rsds[:, :, :] - rsds[:, lat_idx, lon_idx].reshape((time.shape[0],\ 1, 1)) # Open a new NetCDF file to write the data to. For format, you can choose from # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4' w_nc_fid = Dataset('rsds.departure.sig995.2012.nc', 'w', format='NETCDF4') w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\ "%s from its value at %s. %s" %\ (nc_fid.variables['rsds'].var_desc.lower(),\ darwin['name'], nc_fid.description) # Using our previous dimension information, we can create the new dimensions data = {} for dim in nc_dims: w_nc_fid.createDimension(dim, nc_fid.variables[dim].size) data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\ (dim,)) # You can do this step yourself but someone else did the work for us. for ncattr in nc_fid.variables[dim].ncattrs(): data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr)) # Assign the dimension data to the new NetCDF file. w_nc_fid.variables['time'][:] = time w_nc_fid.variables['lat'][:] = lats w_nc_fid.variables['lon'][:] = lons # Ok, time to create our departure variable w_nc_var = w_nc_fid.createVariable('rsds_dep', 'f8', ('time', 'lat', 'lon')) w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\ 'units': u"degK", 'level_desc': u'Surface',\ 'var_desc': u"Air temperature departure",\ 'statistic': u'Mean\nM'}) w_nc_fid.variables['rsds_dep'][:] = departure w_nc_fid.close() # close the new file # Rounded maximum absolute value of the departure used for contouring max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1) # Generate a figure of the departure for a single day fig = plt.figure() fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9) m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\ llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0) m.drawcoastlines() m.drawmapboundary() dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons) dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False) lon2d, lat2d = np.meshgrid(lons_cyclic, lats) x, y = m(lon2d, lat2d) levels = np.linspace(-max_dep, max_dep, 11) cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr) x, y = m(darwin['lon'], darwin['lat']) plt.plot(x, y, c='c', marker='o') plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold') cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5) cbar.set_label("%s departure (%s)" % (nc_fid.variables['rsds'].var_desc,\ nc_fid.variables['rsds'].units)) plt.title("Departure of Global %s from\n%s for %s" %\ (nc_fid.variables['rsds'].var_desc, darwin['name'], cur_time)) plt.show() # Close original NetCDF file. nc_fid.close()
gpl-3.0
mirestrepo/voxels-at-lems
registration_eval/results/compute_trans_geo_accuracy.py
1
13935
#!/usr/bin/env python # encoding: utf-8 """ compute_transformation_error.py Created by Maria Isabel Restrepo on 2012-09-24. Copyright (c) 2012 . All rights reserved. This script computes the distances betweeen an estimated similarity transformation and its ground truth The transformation is used to transform a "source" coordinate system into a "target coordinate system" To compute the error between the translations, the L2 norm diference translation vectors in the "source coordinate system" is computed. Since distances are preserved under R and T, only scale is applied. The rotation error is computed as the half angle between the normalized queternions i.e acos(|<q1,q2>|) in [0, pi/2] """ import os import sys import logging import argparse import vpcl_adaptor as vpcl import numpy as np from numpy import linalg as LA import transformations as tf import math import matplotlib.pyplot as plt sys.path.append(os.pardir) import reg3d_transformations as reg3d_T LOG = None """Compute the accuracy between the LIDAR fiducial points and corresponding geo-register correspondances""" def compute_ref_accuracy(fid_path, original_corrs_path, geo_tform): #Load fiducial .ply fid = open(fid_path, 'r') fid_points = np.genfromtxt(fid, dtype=float, delimiter=' ', skip_header=9) fid.close() #Load original corrs .ply fid = open(original_corrs_path, 'r') original_corrs = np.genfromtxt(fid, dtype=float, delimiter=' ', skip_header=9) fid.close() #Load transformation #************GEO**************" Tfis = open(geo_tform, 'r') lines = [] lines = Tfis.readlines() scale_geo = float(lines[0]) Ss_geo = tf.scale_matrix(scale_geo) quat_line = lines[1].split(" ") quat_geo = np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])]) Rs_geo = tf.quaternion_matrix(quat_geo) trans_line = lines[2].split(" ") trans_geo = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]) Tfis.close() Hs_geo = Rs_geo.copy() Hs_geo[:3, 3] = trans_geo[:3] Hs_geo = Ss_geo.dot(Hs_geo) LOG.debug("\n******Geo***** \n Scale: \n%s \nR:\n%s \nT:\n%s \nH:\n%s", Ss_geo, Rs_geo, trans_geo, Hs_geo) #Compute the "reference error" #i.e. fiducial points - geo registered correspondances npoints, c = fid_points.shape if npoints != 30: LOG.warn("Number of fiducial point is NOT 30") if c != 3: LOG.error("Fiducial points has the wrong number of dimensions") # import code; code.interact(local=locals()) fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T geo_corrs_hom = Hs_geo.dot(original_corrs_hom) geo_ref_diff = geo_corrs_hom - fid_points_hom # import pdb; pdb.set_trace() delta_z = np.sqrt(geo_ref_diff[2, :] * geo_ref_diff[2, :]) delta_r = np.sqrt(geo_ref_diff[0, :] * geo_ref_diff[0, :] + geo_ref_diff[1, :] * geo_ref_diff[1, :]) return delta_z, delta_r def compute_geo_accuracy(fid_path, original_corrs_path, geo_tform, trials_root, desc_name, niter, ntrials, percentile=99): #Load fiducial .ply fid = open(fid_path, 'r') fid_points = np.genfromtxt(fid, delimiter=' ', skip_header=9) fid.close() #Load original corrs .ply fid = open(original_corrs_path, 'r') original_corrs = np.genfromtxt(fid, delimiter=' ', skip_header=9) fid.close() #load the geo tranformation GEO = reg3d_T.geo_transformation(geo_tform); #Compute the "reference error" #i.e. fiducial points - geo registered correspondances npoints, c = fid_points.shape if npoints != 30: LOG.warn("Number of fiducial point is NOT 30") if c != 3: LOG.error("Fiducial points has the wrong number of dimensions") # import code; code.interact(local=locals()) fid_points_hom = np.hstack((fid_points, np.ones([npoints, 1]))).T original_corrs_hom = np.hstack((original_corrs, np.ones([npoints, 1]))).T geo_corrs_hom = GEO.transform_points(original_corrs_hom) geo_ref_diff = geo_corrs_hom - fid_points_hom # import pdb; pdb.set_trace() delta_z = (geo_ref_diff[2, :] **2) ** (1./2.) delta_r = (geo_ref_diff[0, :] **2 + geo_ref_diff[1, :] **2 )** (1./2.) delta_z_ia = np.zeros([ntrials, npoints]) delta_r_ia = np.zeros([ntrials, npoints]) delta_z_icp = np.zeros([ntrials, npoints]) delta_r_icp = np.zeros([ntrials, npoints]) for trial in range(0, ntrials): print "********Trial", trial, "**********" #Load the transformations for this trial #************Hs**************# #read source to target "Ground Truth" Transformation Tfile = trials_root + "/trial_" + str(trial) + "/Hs_inv.txt" GT_Tform = reg3d_T.gt_transformation(Tfile) src_features_dir = (trials_root + "/trial_" + str(trial) + "/" + desc_name) Tfile_ia = (src_features_dir + "/ia_transformation_" + str(percentile) + "_" + str(niter) + ".txt") Tfile_icp = (src_features_dir + "/icp_transformation_" + str(percentile) + "_" + str(niter) + ".txt") REG_Tform = reg3d_T.pcl_transformation(Tfile_ia, Tfile_icp) Hs_ia_error = REG_Tform.Hs_ia.dot(GT_Tform.Hs) Hs_icp_error = REG_Tform.Hs_icp.dot(GT_Tform.Hs) # transform the points with the residual transformations ia_corrs_hom = Hs_ia_error.dot(original_corrs_hom) icp_corrs_hom = Hs_icp_error.dot(original_corrs_hom) # geo-register geo_ia_corrs_hom = GEO.transform_points(ia_corrs_hom) geo_icp_corrs_hom = GEO.transform_points(icp_corrs_hom) # distances geo_ia_ref_diff = geo_ia_corrs_hom - fid_points_hom geo_icp_ref_diff = geo_icp_corrs_hom - fid_points_hom delta_z_ia[trial, :] = np.sqrt(geo_ia_ref_diff[2, :] ** 2) delta_r_ia[trial, :] = np.sqrt(geo_ia_ref_diff[0, :] ** 2 + geo_ia_ref_diff[1, :] ** 2 ) delta_z_icp[trial, :] = np.sqrt(geo_icp_ref_diff[2, :] ** 2) delta_r_icp[trial, :] = np.sqrt(geo_icp_ref_diff[0, :] ** 2 + geo_icp_ref_diff[1, :] ** 2) # import pdb; pdb.set_trace() return delta_z, delta_r,\ delta_z_ia, delta_r_ia, \ delta_z_icp, delta_r_icp def main(logfile=None): global LOG LOG = setlogging(logfile) descriptors = ["FPFH_30", "SHOT_30"] niter = 500; ntrials = 10; plot_errors = True; if (plot_errors): colors = ['magenta','green']; markers = ['o', 's', '*', '+', '^', 'v'] fid_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts1.ply" original_corrs_path = "/data/lidar_providence/downtown_offset-1-financial-dan-pts0.ply" trials_root = "/Users/isa/Experiments/reg3d_eval/downtown_dan"; geo_tform = "/data/lidar_providence/downtown_offset-1-financial-dan-Hs.txt" for d_idx in range(0, len(descriptors)): desc_name = descriptors[d_idx] delta_z, delta_r, \ delta_z_ia, delta_r_ia, \ delta_z_icp, delta_r_icp = compute_geo_accuracy(fid_path, original_corrs_path, geo_tform, trials_root, desc_name, niter, ntrials) #sort errors for all trials to get the 70 80 90 % errors delta_z_ia.sort(axis=0) delta_r_ia.sort(axis=0) delta_z_icp.sort(axis=0) delta_r_icp.sort(axis=0) CE_70_ia = delta_r_ia[int(0.7 * ntrials) - 1, :] CE_80_ia = delta_r_ia[int(0.8 * ntrials) - 1, :] CE_90_ia = delta_r_ia[int(0.9 * ntrials) - 1, :] LE_70_ia = delta_z_ia[int(0.7 * ntrials) - 1, :] LE_80_ia = delta_z_ia[int(0.8 * ntrials) - 1, :] LE_90_ia = delta_z_ia[int(0.9 * ntrials) - 1, :] CE_70_icp = delta_r_icp[int(0.7 * ntrials) - 1, :] CE_80_icp = delta_r_icp[int(0.8 * ntrials) - 1, :] CE_90_icp = delta_r_icp[int(0.9 * ntrials) - 1, :] LE_70_icp = delta_z_icp[int(0.7 * ntrials) - 1, :] LE_80_icp = delta_z_icp[int(0.8 * ntrials) - 1, :] LE_90_icp = delta_z_icp[int(0.9 * ntrials) - 1, :] if (plot_errors): #Plot CE and LE fig_ia_CE = plt.figure() ax_ia_CE = fig_ia_CE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_ia_CE.plot(CE_70_ia, "--s", color="green", label= "CE_70"); ax_ia_CE.plot(CE_80_ia, "--^", color="magenta", label= "CE_80"); ax_ia_CE.plot(CE_90_ia, "--*", color="blue", label= "CE_90"); ax_ia_CE.plot( delta_r, "--o", color="cyan", label= "GT"); ax_ia_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_ia_CE.set_ylabel('Error (meters)',fontsize= 20); ax_ia_CE.legend(loc='best', frameon=False); # ax_ia_CE.set_title('IA CE') fname = trials_root + "/GEO_results/IA_CE_" + desc_name + ".pdf" fig_ia_CE.savefig(fname, transparent=True, pad_inches=5) fig_ia_LE = plt.figure() ax_ia_LE = fig_ia_LE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_ia_LE.plot(LE_70_ia, "--s", color="green", label= "LE_70"); ax_ia_LE.plot(LE_80_ia, "--^", color="magenta", label= "LE_80"); ax_ia_LE.plot(LE_90_ia, "--*", color="blue", label= "LE_90"); ax_ia_LE.plot( delta_z, "--o", color="cyan", label= "GT"); ax_ia_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_ia_LE.set_ylabel('Error (meters)',fontsize= 20); ax_ia_LE.legend(loc='best', frameon=False); # ax_ia_LE.set_title('IA LE') fname = trials_root + "/GEO_results/IA_LE_" + desc_name + ".pdf" fig_ia_LE.savefig(fname, transparent=True, pad_inches=5) fig_icp_CE = plt.figure() ax_icp_CE = fig_icp_CE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_icp_CE.plot(CE_70_icp, "--s", color="green", label= "CE_70"); ax_icp_CE.plot(CE_80_icp, "--^", color="magenta", label= "CE_80"); ax_icp_CE.plot(CE_90_icp, "--*", color="blue", label= "CE_90"); ax_icp_CE.plot( delta_r, "--o", color="cyan", label= "GT"); ax_icp_CE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_icp_CE.set_ylabel('Error (meters)',fontsize= 20); ax_icp_CE.legend(loc='best', frameon=False); # ax_icp_CE.set_title('ICP CE') fname = trials_root + "/GEO_results/ICP_CE_" + desc_name + ".pdf" fig_icp_CE.savefig(fname, transparent=True, pad_inches=5) fig_icp_LE = plt.figure() ax_icp_LE = fig_icp_LE.add_subplot(111); plt.hold(True); plt.axis(tight=True); ax_icp_LE.plot(LE_70_icp, "--s", color="green", label= "LE_70"); ax_icp_LE.plot(LE_80_icp, "--^", color="magenta", label= "LE_80"); ax_icp_LE.plot(LE_90_icp, "--*", color="blue", label= "LE_90"); ax_icp_LE.plot( delta_z, "--o", color="cyan", label= "GT"); ax_icp_LE.set_xlabel('Fiducial Marker (index)',fontsize= 20); ax_icp_LE.set_ylabel('Error (meters)',fontsize= 20); ax_icp_LE.legend(loc='best', frameon=False); # ax_icp_LE.set_title('ICP LE') fname = trials_root + "/GEO_results/ICP_LE_" + desc_name + ".pdf" fig_icp_LE.savefig(fname, transparent=True, pad_inches=5) # axT.set_xlim((0,505) ); # axT.set_yticks(np.arange(0.0,250.0,20)); # # axT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, # # ncol=4, mode="expand", borderaxespad=0.) # # figT.savefig("/Users/isa/Experiments/reg3d_eval/downtown_dan/T_error.pdf", transparent=True, pad_inches=5) # plt.show(); # import pdb; pdb.set_trace() def setlogging(logfile=None): level = logging.DEBUG logger = logging.getLogger(__name__) logger.setLevel(level) # create formatter and add it to the handlers formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(formatter) # add the handlers to logger logger.addHandler(ch) # create file handler which logs error messages if logfile: print "Logging to file" fh = logging.FileHandler(logfile) fh.setLevel(level) fh.setFormatter(formatter) logger.addHandler(fh) #test logging logger.debug("debug message") logger.info("info message") logger.warn("warn message") logger.error("error message") logger.critical("critical message") return logger if __name__ == '__main__': # initialize the parser object: parser = argparse.ArgumentParser(description="Export PLY to PCD file") # define options here: parser.add_argument("-v", "--verbose", action='store', type = bool, dest="verbose", default=True, help="Write debug log to log_file") parser.add_argument("-L", "--log", dest="logfile", help="write debug log to log_file") args = parser.parse_args(argv) # set up logging if args.verbose: status = main(args.logfile) else: status = main() sys.exit(status)
bsd-2-clause
MechCoder/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
KDD-OpenSource/geox-young-academy
day-3/Kalman-filter_Mark.py
1
1494
# -*- coding: utf-8 -*- """ Created on Wed Oct 11 10:10:24 2017 @author: Mark """ import numpy as np import matplotlib.pyplot as plt #Define functions def model(state_0,A,B): state_1 = A*state_0 + np.random.normal(0,B) return state_1 state_null=np.random.normal(0,0.4) def observation_function(state,R): obs=state+np.random.normal(0,R) return obs def forecast(state_0,cov_0,A,B): state_1=A*state_0 cov_1=A*cov_0*A+B return state_1,cov_1 def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0): state_1 = state_1_hat - K*(H*state_1_hat - obs_0) cov_1 = cov_1_hat - K*H*cov_1_hat return state_1, cov_1 def kalman_gain(cov_1_hat,H,R): K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1) return K #Initialize model parameters A = 0.5 H = 1 B = 0.5 R = 0.1 lev = 100 #Sythetic Model STATE_real = np.zeros(lev) OBS_real = np.zeros(lev) STATE_real[0] = np.random.normal(5,0.1) OBS_real[0] = observation_function(STATE_real[0],R) for i in range (1,lev-1): STATE_real[i] = model(STATE_real[i-1],0.4,0.01) OBS_real[i] = observation_function(STATE_real[i],R) #Kalman-filter STATE = np.zeros(lev) COV = np.zeros(lev) STATE[0] = state_null COV[0] = B for i in range (1,lev-1): (state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B) K = kalman_gain(cov_hat,H,R) (STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i]) plt.plot(STATE) plt.plot(STATE_real)
mit
mskwark/PconsC3
extra/arne/MSA/find-intradom.py
1
1381
#!/usr/bin/env perl # Find all contacts beween domains.. import sys, os, re, string import argparse from os.path import expanduser home = expanduser("~") sys.path.append(home + '/bioinfo-toolbox/parsing') sys.path.append(home + '/git/bioinfo-toolbox/parsing') import parse_contacts import numpy as np import matplotlib matplotlib.use('Agg') sep=5 contacts = parse_contacts.parse(open(c_filename, 'r'), sep) contacts_np = parse_contacts.get_numpy_cmap(contacts) contacts_np = contacts_np[start:end,start:end] for i in range(len(contacts)): score = contacts[i][0] c_x = contacts[i][1] - 1 c_y = contacts[i][2] - 1 # only look at contacts within given range # default: take full sequence range into account if c_x < start or c_x >= end: continue if c_y < start or c_y >= end: continue if c_y-c_x < start or c_y >= end: continue if c_x < domain pos_diff = abs(c_x - c_y) too_close = pos_diff < 5 if __name__ == "__main__": p = argparse.ArgumentParser(description='Plot protein residue contact maps.') p.add_argument('-t', '--threshold', default=-1, type=float) p.add_argument('--start', default=0, type=int) p.add_argument('--end', default=-1, type=int) p.add_argument('--sep', default=5, type=int) p.add_argument('--domain', default=-1, type=int)
gpl-2.0
goulu/Goulib
Goulib/plot.py
1
4898
""" plotable rich object display on IPython/Jupyter notebooks """ __author__ = "Philippe Guglielmetti" __copyright__ = "Copyright 2015, Philippe Guglielmetti" __credits__ = [] __license__ = "LGPL" # import matplotlib and set backend once for all from . import itertools2 import os import io import sys import logging import base64 import matplotlib if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ? matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend elif sys.gettrace(): # http://stackoverflow.com/questions/333995/how-to-detect-that-python-code-is-being-executed-through-the-debugger matplotlib.use('Agg') # because 'QtAgg' crashes python while debugging else: pass # matplotlib.use('pdf') #for high quality pdf, but doesn't work for png, svg ... logging.info('matplotlib backend is %s' % matplotlib.get_backend()) class Plot(object): """base class for plotable rich object display on IPython notebooks inspired from http://nbviewer.ipython.org/github/ipython/ipython/blob/3607712653c66d63e0d7f13f073bde8c0f209ba8/docs/examples/notebooks/display_protocol.ipynb """ def _plot(self, ax, **kwargs): """abstract method, must be overriden :param ax: `matplotlib.axis` :return ax: `matplotlib.axis` after plot """ raise NotImplementedError( 'objects derived from plot.PLot must define a _plot method') return ax def render(self, fmt='svg', **kwargs): return render([self], fmt, **kwargs) # call global function def save(self, filename, **kwargs): return save([self], filename, **kwargs) # call global function # for IPython notebooks def _repr_html_(self): """default rich format is svg plot""" try: return self._repr_svg_() except NotImplementedError: pass # this returns the same as _repr_png_, but is Table compatible buffer = self.render('png') s = base64.b64encode(buffer).decode('utf-8') return '<img src="data:image/png;base64,%s">' % s def html(self, **kwargs): from IPython.display import HTML return HTML(self._repr_html_(**kwargs)) def svg(self, **kwargs): from IPython.display import SVG return SVG(self._repr_svg_(**kwargs)) def _repr_svg_(self, **kwargs): return self.render(fmt='svg', **kwargs).decode('utf-8') def png(self, **kwargs): from IPython.display import Image return Image(self._repr_png_(**kwargs), embed=True) def _repr_png_(self, **kwargs): return self.render(fmt='png', **kwargs) def plot(self, **kwargs): """ renders on IPython Notebook (alias to make usage more straightforward) """ return self.svg(**kwargs) def render(plotables, fmt='svg', **kwargs): """renders several Plot objects""" import matplotlib.pyplot as plt # extract optional arguments used for rasterization printargs, kwargs = itertools2.dictsplit( kwargs, ['dpi', 'transparent', 'facecolor', 'background', 'figsize'] ) ylim = kwargs.pop('ylim', None) xlim = kwargs.pop('xlim', None) title = kwargs.pop('title', None) fig, ax = plt.subplots() labels = kwargs.pop('labels', [None] * len(plotables)) # slightly shift the points to make superimposed curves more visible offset = kwargs.pop('offset', 0) for i, obj in enumerate(plotables): if labels[i] is None: labels[i] = str(obj) if not title: try: title = obj._repr_latex_() # check that title can be used in matplotlib from matplotlib.mathtext import MathTextParser parser = MathTextParser('path').parse(title) except Exception as e: title = labels[i] ax = obj._plot(ax, label=labels[i], offset=i * offset, **kwargs) if ylim: plt.ylim(ylim) if xlim: plt.xlim(xlim) ax.set_title(title) if len(labels) > 1: ax.legend() output = io.BytesIO() fig.savefig(output, format=fmt, **printargs) data = output.getvalue() plt.close(fig) return data def png(plotables, **kwargs): from IPython.display import Image return Image(render(plotables, 'png', **kwargs), embed=True) def svg(plotables, **kwargs): from IPython.display import SVG return SVG(render(plotables, 'svg', **kwargs)) plot = svg def save(plotables, filename, **kwargs): ext = filename.split('.')[-1].lower() kwargs.setdefault('dpi', 600) # force good quality return open(filename, 'wb').write(render(plotables, ext, **kwargs))
lgpl-3.0
yavalvas/yav_com
build/matplotlib/lib/mpl_toolkits/axes_grid1/mpl_axes.py
8
4971
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import warnings import matplotlib.axes as maxes from matplotlib.artist import Artist from matplotlib.axis import XAxis, YAxis class SimpleChainedObjects(object): def __init__(self, objects): self._objects = objects def __getattr__(self, k): _a = SimpleChainedObjects([getattr(a, k) for a in self._objects]) return _a def __call__(self, *kl, **kwargs): for m in self._objects: m(*kl, **kwargs) class Axes(maxes.Axes): def toggle_axisline(self, b): warnings.warn("toggle_axisline is not necessary and deprecated in axes_grid1") class AxisDict(dict): def __init__(self, axes): self.axes = axes super(Axes.AxisDict, self).__init__() def __getitem__(self, k): if isinstance(k, tuple): r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k]) return r elif isinstance(k, slice): if k.start == None and k.stop == None and k.step == None: r = SimpleChainedObjects(list(six.itervalues(self))) return r else: raise ValueError("Unsupported slice") else: return dict.__getitem__(self, k) def __call__(self, *v, **kwargs): return maxes.Axes.axis(self.axes, *v, **kwargs) def __init__(self, *kl, **kw): super(Axes, self).__init__(*kl, **kw) def _init_axis_artists(self, axes=None): if axes is None: axes = self self._axislines = self.AxisDict(self) self._axislines["bottom"] = SimpleAxisArtist(self.xaxis, 1, self.spines["bottom"]) self._axislines["top"] = SimpleAxisArtist(self.xaxis, 2, self.spines["top"]) self._axislines["left"] = SimpleAxisArtist(self.yaxis, 1, self.spines["left"]) self._axislines["right"] = SimpleAxisArtist(self.yaxis, 2, self.spines["right"]) def _get_axislines(self): return self._axislines axis = property(_get_axislines) def cla(self): super(Axes, self).cla() self._init_axis_artists() class SimpleAxisArtist(Artist): def __init__(self, axis, axisnum, spine): self._axis = axis self._axisnum = axisnum self.line = spine if isinstance(axis, XAxis): self._axis_direction = ["bottom", "top"][axisnum-1] elif isinstance(axis, YAxis): self._axis_direction = ["left", "right"][axisnum-1] else: raise ValueError("axis must be instance of XAxis or YAxis : %s is provided" % (axis,)) Artist.__init__(self) def _get_major_ticks(self): tickline = "tick%dline" % self._axisnum return SimpleChainedObjects([getattr(tick, tickline) for tick \ in self._axis.get_major_ticks()]) def _get_major_ticklabels(self): label = "label%d" % self._axisnum return SimpleChainedObjects([getattr(tick, label) for tick \ in self._axis.get_major_ticks()]) def _get_label(self): return self._axis.label major_ticks = property(_get_major_ticks) major_ticklabels = property(_get_major_ticklabels) label = property(_get_label) def set_visible(self, b): self.toggle(all=b) self.line.set_visible(b) self._axis.set_visible(True) Artist.set_visible(self, b) def set_label(self, txt): self._axis.set_label_text(txt) def toggle(self, all=None, ticks=None, ticklabels=None, label=None): if all: _ticks, _ticklabels, _label = True, True, True elif all is not None: _ticks, _ticklabels, _label = False, False, False else: _ticks, _ticklabels, _label = None, None, None if ticks is not None: _ticks = ticks if ticklabels is not None: _ticklabels = ticklabels if label is not None: _label = label tickOn = "tick%dOn" % self._axisnum labelOn = "label%dOn" % self._axisnum if _ticks is not None: tickparam = {tickOn: _ticks} self._axis.set_tick_params(**tickparam) if _ticklabels is not None: tickparam = {labelOn: _ticklabels} self._axis.set_tick_params(**tickparam) if _label is not None: pos = self._axis.get_label_position() if (pos == self._axis_direction) and not _label: self._axis.label.set_visible(False) elif _label: self._axis.label.set_visible(True) self._axis.set_label_position(self._axis_direction) if __name__ == '__main__': fig = figure() ax = Axes(fig, [0.1, 0.1, 0.8, 0.8]) fig.add_axes(ax) ax.cla()
mit
rsignell-usgs/PySeidon
pyseidon/tidegaugeClass/plotsTidegauge.py
2
1096
#!/usr/bin/python2.7 # encoding: utf-8 from __future__ import division import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as Tri import matplotlib.ticker as ticker import seaborn class PlotsTidegauge: """'Plots' subset of Tidegauge class gathers plotting functions""" def __init__(self, variable, debug=False): self._var = variable def plot_xy(self, x, y, title=' ', xLabel=' ', yLabel=' '): """ Simple X vs Y plot Inputs: ------ - x = 1D array - y = 1D array """ fig = plt.figure(figsize=(18,10)) plt.rc('font',size='22') self._fig = plt.plot(x, y, label=title) scale = 1 ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale)) plt.ylabel(yLabel) plt.xlabel(xLabel) #plt.legend() plt.show() #TR_comments: templates # def whatever(self, debug=False): # if debug or self._debug: # print 'Start whatever...' # # if debug or self._debug: # print '...Passed'
agpl-3.0
chrismattmann/tika-similarity
sk_kmeans.py
2
4409
#!/usr/bin/env python2.7 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # from tika import parser import pandas as pd from vector import Vector from sklearn.cluster import KMeans import argparse, os, json def filterFiles(inputDir, acceptTypes): filename_list = [] for root, dirnames, files in os.walk(inputDir): dirnames[:] = [d for d in dirnames if not d.startswith('.')] for filename in files: if not filename.startswith('.'): filename_list.append(os.path.join(root, filename)) filename_list = (filename for filename in filename_list if "metadata" in parser.from_file(filename)) if acceptTypes: filename_list = (filename for filename in filename_list if str(parser.from_file(filename)['metadata']['Content-Type'].encode('utf-8').decode('utf-8')).split('/')[-1] in acceptTypes) else: print("Accepting all MIME Types.....") return filename_list if __name__ == "__main__": argParser = argparse.ArgumentParser('k-means Clustering of documents based on metadata values') argParser.add_argument('--inputDir', required=True, help='path to directory containing files') argParser.add_argument('--outJSON', required=True, help='/path/to/clusters.json containing k-means cluster assignments') argParser.add_argument('--Kvalue', help='number of clusters to find') #argParser.add_argument('--findK', action='store_true', help='find the optimal value of K') argParser.add_argument('--accept', nargs='+', type=str, help='Optional: compute similarity only on specified IANA MIME Type(s)') args = argParser.parse_args() # cluster for a particular value of K # if args.inputDir and args.outJSON and args.findK: if args.inputDir and args.outJSON and args.Kvalue: list_of_points = [] for eachFile in filterFiles(args.inputDir, args.accept): list_of_points.append(Vector(eachFile, parser.from_file(eachFile)["metadata"])) list_of_Dicts = (point.features for point in list_of_points) df = pd.DataFrame(list_of_Dicts) df = df.fillna(0) print(df.shape) kmeans = KMeans(n_clusters=int(args.Kvalue), init='k-means++', max_iter=300, # k-means convergence n_init=10, # find global minima n_jobs=-2, # parallelize ) labels = kmeans.fit_predict(df) # unsupervised (X, y=None) print(labels) # kmeans.labels_ clusters = {} for i in range(0, len(labels)): node = { "metadata": json.dumps(list_of_points[i].features), "name": list_of_points[i].filename.split('/')[-1], "path": list_of_points[i].filename } try: clusters[str(labels[i])].append(node) except KeyError: clusters[str(labels[i])] = [] clusters[str(labels[i])].append(node) # generate clusters.JSON with open(args.outJSON, "w") as jsonF: json_data = {"name": "clusters"} children = [] for key in clusters: cluster_children = {"name": "cluster"+key, "children": clusters[key]} children.append(cluster_children) json_data["children"] = children json.dump(json_data, jsonF) # print matplotlib # user chooses k => generates k # find elbow #kmeans.transform() # String Length Of Course # df.to_csv("bashhshs.csv", sep=',')
apache-2.0
dtusar/coco
code-postprocessing/bbob_pproc/compall/pprldmany.py
3
29654
#! /usr/bin/env python # -*- coding: utf-8 -*- """Generates figure of the bootstrap distribution of ERT. The main method in this module generates figures of Empirical Cumulative Distribution Functions of the bootstrap distribution of the Expected Running Time (ERT) divided by the dimension for many algorithms. The outputs show the ECDFs of the running times of the simulated runs divided by dimension for 50 different targets logarithmically uniformly distributed in [1e−8, 1e2]. The crosses (×) give the median number of function evaluations of unsuccessful runs divided by dimension. **Example** .. plot:: :width: 50% import urllib import tarfile import glob from pylab import * import bbob_pproc as bb # Collect and unarchive data (3.4MB) dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/BIPOP-CMA-ES.tar.gz' filename, headers = urllib.urlretrieve(dataurl) archivefile = tarfile.open(filename) archivefile.extractall() # Empirical cumulative distribution function of bootstrapped ERT figure ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f0*_20.pickle')) figure() bb.compall.pprldmany.plot(ds) # must rather call main instead of plot? bb.compall.pprldmany.beautify() """ from __future__ import absolute_import import os import warnings from pdb import set_trace import numpy as np import matplotlib.pyplot as plt from .. import toolsstats, bestalg, genericsettings from .. import pproc as pp # import dictAlgByDim, dictAlgByFun from .. import toolsdivers # strip_pathname, str_to_latex from .. import pprldistr # plotECDF, beautifyECDF from .. import ppfig # consecutiveNumbers, saveFigure, plotUnifLogXMarkers, logxticks from .. import pptex # numtotex displaybest2009 = True target_values = pp.TargetValues(10**np.arange(2, -8, -0.2)) # possibly changed in config x_limit = None # not sure whether this is necessary/useful x_limit_default = 1e7 # better: 10 * genericsettings.evaluation_setting[1], noisy: 1e8, otherwise: 1e7. maximal run length shown divide_by_dimension = True annotation_line_end_relative = 1.11 # lines between graph and annotation annotation_space_end_relative = 1.24 # figure space end relative to x_limit save_zoom = False # save zoom into left and right part of the figures perfprofsamplesize = genericsettings.simulated_runlength_bootstrap_sample_size_rld # number of bootstrap samples drawn for each fct+target in the performance profile dpi_global_var = 100 # 100 ==> 800x600 (~160KB), 120 ==> 960x720 (~200KB), 150 ==> 1200x900 (~300KB) looks ugly in latex nbperdecade = 1 median_max_evals_marker_format = ['x', 24, 3] label_fontsize = 18 styles = [d.copy() for d in genericsettings.line_styles] # deep copy refcolor = 'wheat' """color of reference (best) algorithm""" save_figure = True close_figure = True # TODO: update the list below which are not relevant anymore best = ('AMaLGaM IDEA', 'iAMaLGaM IDEA', 'VNS (Garcia)', 'MA-LS-Chain', 'BIPOP-CMA-ES', 'IPOP-SEP-CMA-ES', 'BFGS', 'NELDER (Han)', 'NELDER (Doe)', 'NEWUOA', 'full NEWUOA', 'GLOBAL', 'MCS (Neum)', 'DIRECT', 'DASA', 'POEMS', 'Cauchy EDA', 'Monte Carlo') best2 = ('AMaLGaM IDEA', 'iAMaLGaM IDEA', 'VNS (Garcia)', 'MA-LS-Chain', 'BIPOP-CMA-ES', 'IPOP-SEP-CMA-ES', 'BFGS', 'NEWUOA', 'GLOBAL') eseda = ('AMaLGaM IDEA', 'iAMaLGaM IDEA', 'VNS (Garcia)', 'MA-LS-Chain', 'BIPOP-CMA-ES', 'IPOP-SEP-CMA-ES', '(1+1)-CMA-ES', '(1+1)-ES') ESs = ('BIPOP-CMA-ES', 'IPOP-SEP-CMA-ES', '(1+1)-CMA-ES', '(1+1)-ES', 'BIPOP-ES') bestnoisy = () bestbest = ('BIPOP-CMA-ES', 'NEWUOA', 'GLOBAL', 'NELDER (Doe)') nikos = ('AMaLGaM IDEA', 'VNS (Garcia)', 'MA-LS-Chain', 'BIPOP-CMA-ES', '(1+1)-CMA-ES', 'G3-PCX', 'NEWUOA', 'Monte Carlo', 'NELDER (Han)', 'NELDER (Doe)', 'GLOBAL', 'MCS (Neum)') nikos = ('AMaLGaM IDEA', 'VNS (Garcia)', 'MA-LS-Chain', 'BIPOP-CMA-ES', '(1+1)-CMA-ES', '(1+1)-ES', 'IPOP-SEP-CMA-ES', 'BIPOP-ES', 'NEWUOA', 'NELDER (Doe)', 'BFGS', 'Monte Carlo') nikos40D = ('AMaLGaM IDEA', 'iAMaLGaM IDEA', 'BIPOP-CMA-ES', '(1+1)-CMA-ES', '(1+1)-ES', 'IPOP-SEP-CMA-ES', 'NEWUOA', 'NELDER (Han)', 'BFGS', 'Monte Carlo') # three groups which include all algorithms: GA = ('DE-PSO', '(1+1)-ES', 'PSO_Bounds', 'DASA', 'G3-PCX', 'simple GA', 'POEMS', 'Monte Carlo') # 7+1 classics = ('BFGS', 'NELDER (Han)', 'NELDER (Doe)', 'NEWUOA', 'full NEWUOA', 'DIRECT', 'LSfminbnd', 'LSstep', 'Rosenbrock', 'GLOBAL', 'SNOBFIT', 'MCS (Neum)', 'adaptive SPSA', 'Monte Carlo') # 13+1 EDA = ('BIPOP-CMA-ES', '(1+1)-CMA-ES', 'VNS (Garcia)', 'EDA-PSO', 'IPOP-SEP-CMA-ES', 'AMaLGaM IDEA', 'iAMaLGaM IDEA', 'Cauchy EDA', 'BayEDAcG', 'MA-LS-Chain', 'Monte Carlo') # 10+1 # groups according to the talks petr = ('DIRECT', 'LSfminbnd', 'LSstep', 'Rosenbrock', 'G3-PCX', 'Cauchy EDA', 'Monte Carlo') TAO = ('BFGS', 'NELDER (Han)', 'NEWUOA', 'full NEWUOA', 'BIPOP-CMA-ES', 'IPOP-SEP-CMA-ES', '(1+1)-CMA-ES', '(1+1)-ES', 'simple GA', 'Monte Carlo') TAOp = TAO + ('NELDER (Doe)',) MC = ('Monte Carlo',) third = ('POEMS', 'VNS (Garcia)', 'DE-PSO', 'EDA-PSO', 'PSO_Bounds', 'PSO', 'AMaLGaM IDEA', 'iAMaLGaM IDEA', 'MA-LS-Chain', 'DASA', 'BayEDAcG') funi = [1,2] + range(5, 15) # 2 is paired Ellipsoid funilipschitz = [1] + [5,6] + range(8,13) + [14] # + [13] #13=sharp ridge, 7=step-ellipsoid fmulti = [3, 4] + range(15,25) # 3 = paired Rastrigin funisep = [1,2,5] # input parameter settings show_algorithms = eseda + ('BFGS',) # ()==all #show_algorithms = ('IPOP-SEP-CMA-ES', 'IPOP-CMA-ES', 'BIPOP-CMA-ES',) #show_algorithms = ('IPOP-SEP-CMA-ES', 'IPOP-CMA-ES', 'BIPOP-CMA-ES', #'avg NEWUOA', 'NEWUOA', 'full NEWUOA', 'BFGS', 'MCS (Neum)', 'GLOBAL', 'NELDER (Han)', #'NELDER (Doe)', 'Monte Carlo') # ()==all show_algorithms = () # could be one of the list above function_IDs = () function_IDs = range(1,200) # sep ros high mul mulw == 1, 6, 10, 15, 20, 101, 107, 122, #function_IDs = range(101,199) # sep ros high mul mulw == 1, 6, 10, 15, 20, 101, 107, 122, #function_IDs = fmulti # funi fmulti # range(103, 131, 3) # displayed functions #function_IDs = [1,2,3,4,5] # separable functions #function_IDs = [6,7,8,9] # moderate functions #function_IDs = [10,11,12,13,14] # ill-conditioned functions #function_IDs = [15,16,17,18,19] # multi-modal functions #function_IDs = [20,21,22,23,24] # weak structure functions #function_IDs = range(101,131) # noisy testbed #function_IDs = range(101,106+1) # moderate noise #function_IDs = range(107,130+1) # severe noise #function_IDs = range(101,130+1, 3) # gauss noise #function_IDs = range(102,130+1, 3) # unif noise #function_IDs = range(103,130+1, 3) # cauchy noise # function_IDs = range(15,25) # multimodal nonseparable #'-' solid line style #'--' dashed line style #'-.' dash-dot line style #':' dotted line style #'.' point marker #',' pixel marker #'o' circle marker #'v' triangle_down marker #'^' triangle_up marker #'<' triangle_left marker #'>' triangle_right marker #'1' tri_down marker #'2' tri_up marker #'3' tri_left marker #'4' tri_right marker #'s' square marker #'p' pentagon marker #'*' star marker #'h' hexagon1 marker #'H' hexagon2 marker #'+' plus marker #'x' x marker #'D' diamond marker #'d' thin_diamond marker #'|' vline marker #'_' hline marker def plt_plot(*args, **kwargs): return plt.plot(*args, clip_on=False, **kwargs) def beautify(): """Customize figure presentation.""" #plt.xscale('log') # Does not work with matplotlib 0.91.2 a = plt.gca() a.set_xscale('log') #Tick label handling plt.xlim(xmin=1e-0) global divide_by_dimension if divide_by_dimension: plt.xlabel('log10 of (# f-evals / dimension)', fontsize=label_fontsize) else: plt.xlabel('log10 of # f-evals', fontsize=label_fontsize) plt.ylabel('Proportion of function+target pairs', fontsize=label_fontsize) ppfig.logxticks() pprldistr.beautifyECDF() def plotdata(data, maxval=None, maxevals=None, CrE=0., **kwargs): """Draw a normalized ECDF. What means normalized? :param seq data: data set, a 1-D ndarray of runlengths :param float maxval: right-most value to be displayed, will use the largest non-inf, non-nan value in data if not provided :param seq maxevals: if provided, will plot the median of this sequence as a single cross marker :param float CrE: Crafting effort the data will be multiplied by the exponential of this value. :param kwargs: optional arguments provided to plot function. """ #Expect data to be a ndarray. x = data[np.isnan(data)==False] # Take away the nans nn = len(x) x = x[np.isinf(x)==False] # Take away the infs n = len(x) x = np.exp(CrE) * x # correction by crafting effort CrE if n == 0: #res = plt.plot((1., ), (0., ), **kwargs) res = pprldistr.plotECDF(np.array((1., )), n=np.inf, **kwargs) else: dictx = {} # number of appearances of each value in x for i in x: dictx[i] = dictx.get(i, 0) + 1 x = np.array(sorted(dictx)) # x is not a multiset anymore y = np.cumsum(list(dictx[i] for i in x)) # cumsum of size of y-steps (nb of appearences) idx = sum(x <= x_limit**annotation_space_end_relative) - 1 y_last, x_last = y[idx] / float(nn), x[idx] if maxval is None: maxval = max(x) end = np.sum(x <= maxval) x = x[:end] y = y[:end] try: # plot the very last point outside of the "normal" plotting area c = kwargs['color'] plt_plot([x_last] * 2, [y_last] * 2, '.', color=c, markeredgecolor=c) except: pass x2 = np.hstack([np.repeat(x, 2), maxval]) # repeat x-values for each step in the cdf y2 = np.hstack([0.0, np.repeat(y / float(nn), 2)]) res = ppfig.plotUnifLogXMarkers(x2, y2, nbperdecade * 3 / np.log10(maxval), logscale=False, clip_on=False, **kwargs) # res = plotUnifLogXMarkers(x2, y2, nbperdecade, logscale=False, **kwargs) if maxevals: # Should cover the case where maxevals is None or empty x3 = np.median(maxevals) if (x3 <= maxval and # np.any(x2 <= x3) and # maxval < median(maxevals) not plt.getp(res[-1], 'label').startswith('best') ): # TODO: HACK for not considering the best 2009 line try: y3 = y2[x2<=x3][-1] # find right y-value for x3==median(maxevals) except IndexError: # median(maxevals) is smaller than any data, can only happen because of CrE? y3 = y2[0] h = plt.plot((x3,), (y3,), marker=median_max_evals_marker_format[0], markersize=median_max_evals_marker_format[1], markeredgewidth=median_max_evals_marker_format[2], # marker='x', markersize=24, markeredgewidth=3, markeredgecolor=plt.getp(res[0], 'color'), ls=plt.getp(res[0], 'ls'), color=plt.getp(res[0], 'color')) h.extend(res) res = h # so the last element in res still has the label. # Only take sequences for x and y! return res def plotLegend(handles, maxval): """Display right-side legend. :param float maxval: rightmost x boundary :returns: list of (ordered) labels and handles. The figure is stopped at maxval (upper x-bound), and the graphs in the figure are prolonged with straight lines to the right to connect with labels of the graphs (uniformly spread out vertically). The order of the graphs at the upper x-bound line give the order of the labels, in case of ties, the best is the graph for which the x-value of the first step (from the right) is smallest. The annotation string is stripped from preceeding pathnames. """ reslabels = [] reshandles = [] ys = {} lh = 0 for h in handles: x2 = [] y2 = [] for i in h: x2.append(plt.getp(i, "xdata")) y2.append(plt.getp(i, "ydata")) x2 = np.array(np.hstack(x2)) y2 = np.array(np.hstack(y2)) tmp = np.argsort(x2) x2 = x2[tmp] y2 = y2[tmp] h = h[-1] # we expect the label to be in the last element of h tmp = (x2 <= maxval) try: x2bis = x2[y2 < y2[tmp][-1]][-1] except IndexError: # there is no data with a y smaller than max(y) x2bis = 0. ys.setdefault(y2[tmp][-1], {}).setdefault(x2bis, []).append(h) lh += 1 if len(show_algorithms) > 0: lh = min(lh, len(show_algorithms)) if lh <= 1: lh = 2 fontsize = genericsettings.minmax_algorithm_fontsize[0] + np.min((1, np.exp(9-lh))) * ( genericsettings.minmax_algorithm_fontsize[-1] - genericsettings.minmax_algorithm_fontsize[0]) i = 0 # loop over the elements of ys for j in sorted(ys.keys()): for k in reversed(sorted(ys[j].keys())): #enforce best ever comes last in case of equality tmp = [] for h in ys[j][k]: if plt.getp(h, 'label') == 'best 2009': tmp.insert(0, h) else: tmp.append(h) tmp.reverse() ys[j][k] = tmp for h in ys[j][k]: if (not plt.getp(h, 'label').startswith('_line') and (len(show_algorithms) == 0 or plt.getp(h, 'label') in show_algorithms)): y = 0.02 + i * 0.96/(lh-1) tmp = {} for attr in ('lw', 'ls', 'marker', 'markeredgewidth', 'markerfacecolor', 'markeredgecolor', 'markersize', 'zorder'): tmp[attr] = plt.getp(h, attr) legx = maxval**annotation_line_end_relative if 'marker' in attr: legx = maxval**annotation_line_end_relative # reshandles.extend(plt_plot((maxval, legx), (j, y), reshandles.extend(plt_plot((maxval, legx), (j, y), color=plt.getp(h, 'markeredgecolor'), **tmp)) reshandles.append( plt.text(maxval**(0.02 + annotation_line_end_relative), y, toolsdivers.str_to_latex(toolsdivers.strip_pathname1(plt.getp(h, 'label'))), horizontalalignment="left", verticalalignment="center", size=fontsize)) reslabels.append(plt.getp(h, 'label')) #set_trace() i += 1 #plt.axvline(x=maxval, color='k') # Not as efficient? reshandles.append(plt_plot((maxval, maxval), (0., 1.), color='k')) reslabels.reverse() plt.xlim(xmax=maxval**annotation_space_end_relative) return reslabels, reshandles def plot(dsList, targets=None, craftingeffort=0., **kwargs): """This function is obsolete? Generates a graph of the run length distribution of an algorithm. We display the empirical cumulative distribution function ECDF of the bootstrapped distribution of the runlength for an algorithm (in number of function evaluations) to reach the target functions value :py:data:`targets`. :param DataSetList dsList: data set for one algorithm :param seq targets: target function values :param float crafting effort: the data will be multiplied by the exponential of this value :param dict kwargs: additional parameters provided to plot function. :returns: handles """ if targets is None: targets = target_values # set above or in config.py try: if np.min(targets) >= 1: ValueError('smallest target f-value is not smaller than one, use ``pproc.TargetValues(targets)`` to prevent this error') targets = pp.TargetValues(targets) except TypeError: pass res = [] assert len(pp.DataSetList(dsList).dictByDim()) == 1 # We never integrate over dimensions... data = [] maxevals = [] for entry in dsList: for t in targets((entry.funcId, entry.dim)): divisor = entry.dim if divide_by_dimension else 1 x = [np.inf] * perfprofsamplesize runlengthunsucc = [] evals = entry.detEvals([t])[0] runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] data.extend(x) maxevals.extend(runlengthunsucc) # Display data data = np.array(data) data = data[np.isnan(data)==False] # Take away the nans n = len(data) data = data[np.isinf(data)==False] # Take away the infs # data = data[data <= maxval] # Take away rightmost data data = np.exp(craftingeffort) * data # correction by crafting effort CrE if len(data) == 0: # data is empty. res = pprldistr.plotECDF(np.array((1., )), n=np.inf, **kwargs) else: res = pprldistr.plotECDF(np.array(data), n=n, **kwargs) #plotdata(np.array(data), x_limit, maxevals, # CrE=0., **kwargs) if maxevals: # Should cover the case where maxevals is None or empty x3 = np.median(maxevals) if np.any(data > x3): y3 = float(np.sum(data <= x3)) / n h = plt_plot((x3,), (y3,), marker='x', markersize=24, markeredgewidth=3, markeredgecolor=plt.getp(res[0], 'color'), ls='', color=plt.getp(res[0], 'color')) h.extend(res) res = h # so the last element in res still has the label. return res def all_single_functions(dictAlg, sortedAlgs=None, outputdir='.', verbose=0): dictFG = pp.dictAlgByFun(dictAlg) for fg, tmpdictAlg in dictFG.iteritems(): dictDim = pp.dictAlgByDim(tmpdictAlg) for d, entries in dictDim.iteritems(): single_fct_output_dir = (outputdir.rstrip(os.sep) + os.sep + 'pprldmany-single-functions' # + os.sep + ('f%03d' % fg) ) if not os.path.exists(single_fct_output_dir): os.makedirs(single_fct_output_dir) main(entries, order=sortedAlgs, outputdir=single_fct_output_dir, info=('f%03d_%02dD' % (fg, d)), verbose=verbose) def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', dimension=None, verbose=True): """Generates a figure showing the performance of algorithms. From a dictionary of :py:class:`DataSetList` sorted by algorithms, generates the cumulative distribution function of the bootstrap distribution of ERT for algorithms on multiple functions for multiple targets altogether. :param dict dictAlg: dictionary of :py:class:`DataSetList` instances one instance is equivalent to one algorithm, :param list targets: target function values :param list order: sorted list of keys to dictAlg for plotting order :param str outputdir: output directory :param str info: output file name suffix :param bool verbose: controls verbosity """ global x_limit # late assignment of default, because it can be set to None in config global divide_by_dimension # not fully implemented/tested yet if 'x_limit' not in globals() or x_limit is None: x_limit = x_limit_default tmp = pp.dictAlgByDim(dictAlg) # tmp = pp.DictAlg(dictAlg).by_dim() if len(tmp) != 1 and dimension is None: raise ValueError('We never integrate over dimension.') if dimension is not None: if dimension not in tmp.keys(): raise ValueError('dimension %d not in dictAlg dimensions %s' % (dimension, str(tmp.keys()))) tmp = {dimension: tmp[dimension]} dim = tmp.keys()[0] divisor = dim if divide_by_dimension else 1 algorithms_with_data = [a for a in dictAlg.keys() if dictAlg[a] != []] dictFunc = pp.dictAlgByFun(dictAlg) # Collect data # Crafting effort correction: should we consider any? CrEperAlg = {} for alg in algorithms_with_data: CrE = 0. if 1 < 3 and dictAlg[alg][0].algId == 'GLOBAL': tmp = dictAlg[alg].dictByNoise() assert len(tmp.keys()) == 1 if tmp.keys()[0] == 'noiselessall': CrE = 0.5117 elif tmp.keys()[0] == 'nzall': CrE = 0.6572 CrEperAlg[alg] = CrE if CrE != 0.0: print 'Crafting effort for', alg, 'is', CrE dictData = {} # list of (ert per function) per algorithm dictMaxEvals = {} # list of (maxevals per function) per algorithm bestERT = [] # best ert per function # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] for f, dictAlgperFunc in dictFunc.iteritems(): if function_IDs and f not in function_IDs: continue # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: x = [np.inf] * perfprofsamplesize runlengthunsucc = [] try: entry = dictAlgperFunc[alg][0] # one element per fun and per dim. evals = entry.detEvals([t])[0] assert entry.dim == dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = entry.maxevals[np.isnan(evals)] / divisor if len(runlengthsucc) > 0: x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] except (KeyError, IndexError): #set_trace() warntxt = ('Data for algorithm %s on function %d in %d-D ' % (alg, f, dim) + 'are missing.\n') warnings.warn(warntxt) dictData.setdefault(alg, []).extend(x) dictMaxEvals.setdefault(alg, []).extend(runlengthunsucc) displaybest2009 = not isBiobjective #disabled until we find the bug if displaybest2009: #set_trace() bestalgentries = bestalg.loadBestAlgorithm(isBiobjective) bestalgentry = bestalgentries[(dim, f)] bestalgevals = bestalgentry.detEvals(target_values((f, dim))) # print bestalgevals for j in range(len(bestalgevals[0])): if bestalgevals[1][j]: evals = bestalgevals[0][j] #set_trace() assert dim == bestalgentry.dim runlengthsucc = evals[np.isnan(evals) == False] / divisor runlengthunsucc = bestalgentry.maxevals[bestalgevals[1][j]][np.isnan(evals)] / divisor x = toolsstats.drawSP(runlengthsucc, runlengthunsucc, percentiles=[50], samplesize=perfprofsamplesize)[1] else: x = perfprofsamplesize * [np.inf] runlengthunsucc = [] xbest2009.extend(x) maxevalsbest2009.extend(runlengthunsucc) if order is None: order = dictData.keys() # Display data lines = [] if displaybest2009: args = {'ls': '-', 'linewidth': 6, 'marker': 'D', 'markersize': 11., 'markeredgewidth': 1.5, 'markerfacecolor': refcolor, 'markeredgecolor': refcolor, 'color': refcolor, 'label': 'best 2009', 'zorder': -1} lines.append(plotdata(np.array(xbest2009), x_limit, maxevalsbest2009, CrE = 0., **args)) def algname_to_label(algname, dirname=None): """to be extended to become generally useful""" if isinstance(algname, (tuple, list)): # not sure this is needed return ' '.join([str(name) for name in algname]) return str(algname) for i, alg in enumerate(order): try: data = dictData[alg] maxevals = dictMaxEvals[alg] except KeyError: continue args = styles[(i) % len(styles)] args['linewidth'] = 1.5 args['markersize'] = 12. args['markeredgewidth'] = 1.5 args['markerfacecolor'] = 'None' args['markeredgecolor'] = args['color'] args['label'] = algname_to_label(alg) #args['markevery'] = perfprofsamplesize # option available in latest version of matplotlib #elif len(show_algorithms) > 0: #args['color'] = 'wheat' #args['ls'] = '-' #args['zorder'] = -1 # plotdata calls pprldistr.plotECDF which calls ppfig.plotUnifLog... which does the work lines.append(plotdata(np.array(data), x_limit, maxevals, CrE=CrEperAlg[alg], **args)) labels, handles = plotLegend(lines, x_limit) if True: # isLateXLeg: fileName = os.path.join(outputdir,'pprldmany_%s.tex' % (info)) with open(fileName, 'w') as f: f.write(r'\providecommand{\nperfprof}{7}') algtocommand = {} # latex commands for i, alg in enumerate(order): tmp = r'\alg%sperfprof' % pptex.numtotext(i) f.write(r'\providecommand{%s}{\StrLeft{%s}{\nperfprof}}' % (tmp, toolsdivers.str_to_latex( toolsdivers.strip_pathname2(algname_to_label(alg))))) algtocommand[algname_to_label(alg)] = tmp if displaybest2009: tmp = r'\algzeroperfprof' f.write(r'\providecommand{%s}{best 2009}' % (tmp)) algtocommand['best 2009'] = tmp commandnames = [] for label in labels: commandnames.append(algtocommand[label]) # f.write(headleg) if len(order) > 28: # latex sidepanel won't work well for more than 25 algorithms, but original labels are also clipped f.write(r'\providecommand{\perfprofsidepanel}{\mbox{%s}\vfill\mbox{%s}}' % (commandnames[0], commandnames[-1])) else: fontsize_command = r'\tiny{}' if len(order) > 19 else '' f.write(r'\providecommand{\perfprofsidepanel}{{%s\mbox{%s}' % (fontsize_command, commandnames[0])) # TODO: check len(labels) > 0 for i in range(1, len(labels)): f.write('\n' + r'\vfill \mbox{%s}' % commandnames[i]) f.write('}}\n') # f.write(footleg) if verbose: print 'Wrote right-hand legend in %s' % fileName figureName = os.path.join(outputdir,'pprldmany_%s' % (info)) #beautify(figureName, funcsolved, x_limit*x_annote_factor, False, fileFormat=figformat) beautify() text = ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f') text += ',%d-D' % dim # TODO: this is strange when different dimensions are plotted plt.text(0.01, 0.98, text, horizontalalignment="left", verticalalignment="top", transform=plt.gca().transAxes) if len(dictFunc) == 1: plt.title(' '.join((str(dictFunc.keys()[0]), genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) xticks, labels = plt.xticks() tmp = [] for i in xticks: tmp.append('%d' % round(np.log10(i))) a.set_xticklabels(tmp) if save_figure: ppfig.saveFigure(figureName, verbose=verbose) if len(dictFunc) == 1: ppfig.save_single_functions_html( os.path.join(outputdir, 'pprldmany'), '', # algorithms names are clearly visible in the figure add_to_names='_%02dD' %(dim), algorithmCount=ppfig.AlgorithmCount.NON_SPECIFIED ) if close_figure: plt.close() # TODO: should return status or sthg if __name__ == "__main__": # should become a test case import sys import bbob_pproc sys.path.append('.')
bsd-3-clause
hainm/scikit-learn
benchmarks/bench_sample_without_replacement.py
397
8008
""" Benchmarks for sampling without replacement of integer. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import operator import matplotlib.pyplot as plt import numpy as np import random from sklearn.externals.six.moves import xrange from sklearn.utils.random import sample_without_replacement def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_sample(sampling, n_population, n_samples): gc.collect() # start time t_start = datetime.now() sampling(n_population, n_samples) delta = (datetime.now() - t_start) # stop time time = compute_time(t_start, delta) return time if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-population", dest="n_population", default=100000, type=int, help="Size of the population to sample from.") op.add_option("--n-step", dest="n_steps", default=5, type=int, help="Number of step interval between 0 and n_population.") default_algorithms = "custom-tracking-selection,custom-auto," \ "custom-reservoir-sampling,custom-pool,"\ "python-core-sample,numpy-permutation" op.add_option("--algorithm", dest="selected_algorithm", default=default_algorithms, type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. \nAvailable: %default") # op.add_option("--random-seed", # dest="random_seed", default=13, type=int, # help="Seed used by the random number generators.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) selected_algorithm = opts.selected_algorithm.split(',') for key in selected_algorithm: if key not in default_algorithms.split(','): raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)." % (key, default_algorithms)) ########################################################################### # List sampling algorithm ########################################################################### # We assume that sampling algorithm has the following signature: # sample(n_population, n_sample) # sampling_algorithm = {} ########################################################################### # Set Python core input sampling_algorithm["python-core-sample"] = \ lambda n_population, n_sample: \ random.sample(xrange(n_population), n_sample) ########################################################################### # Set custom automatic method selection sampling_algorithm["custom-auto"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="auto", random_state=random_state) ########################################################################### # Set custom tracking based method sampling_algorithm["custom-tracking-selection"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="tracking_selection", random_state=random_state) ########################################################################### # Set custom reservoir based method sampling_algorithm["custom-reservoir-sampling"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="reservoir_sampling", random_state=random_state) ########################################################################### # Set custom reservoir based method sampling_algorithm["custom-pool"] = \ lambda n_population, n_samples, random_state=None: \ sample_without_replacement(n_population, n_samples, method="pool", random_state=random_state) ########################################################################### # Numpy permutation based sampling_algorithm["numpy-permutation"] = \ lambda n_population, n_sample: \ np.random.permutation(n_population)[:n_sample] ########################################################################### # Remove unspecified algorithm sampling_algorithm = dict((key, value) for key, value in sampling_algorithm.items() if key in selected_algorithm) ########################################################################### # Perform benchmark ########################################################################### time = {} n_samples = np.linspace(start=0, stop=opts.n_population, num=opts.n_steps).astype(np.int) ratio = n_samples / opts.n_population print('Benchmarks') print("===========================") for name in sorted(sampling_algorithm): print("Perform benchmarks for %s..." % name, end="") time[name] = np.zeros(shape=(opts.n_steps, opts.n_times)) for step in xrange(opts.n_steps): for it in xrange(opts.n_times): time[name][step, it] = bench_sample(sampling_algorithm[name], opts.n_population, n_samples[step]) print("done") print("Averaging results...", end="") for name in sampling_algorithm: time[name] = np.mean(time[name], axis=1) print("done\n") # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Sampling algorithm performance:") print("===============================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") fig = plt.figure('scikit-learn sample w/o replacement benchmark results') plt.title("n_population = %s, n_times = %s" % (opts.n_population, opts.n_times)) ax = fig.add_subplot(111) for name in sampling_algorithm: ax.plot(ratio, time[name], label=name) ax.set_xlabel('ratio of n_sample / n_population') ax.set_ylabel('Time (s)') ax.legend() # Sort legend labels handles, labels = ax.get_legend_handles_labels() hl = sorted(zip(handles, labels), key=operator.itemgetter(1)) handles2, labels2 = zip(*hl) ax.legend(handles2, labels2, loc=0) plt.show()
bsd-3-clause
MadsJensen/agency_connectivity
tf_functions.py
1
5293
""" Functions for TF analysis. @author: mje @email: mads [] cnru.dk """ import mne from mne.time_frequency import (psd_multitaper, tfr_multitaper, tfr_morlet, cwt_morlet) from mne.viz import iter_topography import matplotlib.pyplot as plt import numpy as np def calc_psd_epochs(epochs, plot=False): """Calculate PSD for epoch. Parameters ---------- epochs : list of epochs plot : bool To show plot of the psds. It will be average for each condition that is shown. Returns ------- psds_vol : numpy array The psds for the voluntary condition. psds_invol : numpy array The psds for the involuntary condition. """ tmin, tmax = -0.5, 0.5 fmin, fmax = 2, 90 # n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 psds_vol, freqs = psd_multitaper(epochs["voluntary"], tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax) psds_inv, freqs = psd_multitaper(epochs["involuntary"], tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax) psds_vol = 20 * np.log10(psds_vol) # scale to dB psds_inv = 20 * np.log10(psds_inv) # scale to dB if plot: def my_callback(ax, ch_idx): """Executed once you click on one of the channels in the plot.""" ax.plot(freqs, psds_vol_plot[ch_idx], color='red', label="voluntary") ax.plot(freqs, psds_inv_plot[ch_idx], color='blue', label="involuntary") ax.set_xlabel = 'Frequency (Hz)' ax.set_ylabel = 'Power (dB)' ax.legend() psds_vol_plot = psds_vol.copy().mean(axis=0) psds_inv_plot = psds_inv.copy().mean(axis=0) for ax, idx in iter_topography(epochs.info, fig_facecolor='k', axis_facecolor='k', axis_spinecolor='k', on_pick=my_callback): ax.plot(psds_vol_plot[idx], color='red', label="voluntary") ax.plot(psds_inv_plot[idx], color='blue', label="involuntary") plt.legend() plt.gcf().suptitle('Power spectral densities') plt.show() return psds_vol, psds_inv, freqs def multitaper_analysis(epochs): """ Parameters ---------- epochs : list of epochs Returns ------- result : numpy array The result of the multitaper analysis. """ frequencies = np.arange(6., 90., 2.) n_cycles = frequencies / 2. time_bandwidth = 4 # Same time-smoothing as (1), 7 tapers. power, plv = tfr_multitaper(epochs, freqs=frequencies, n_cycles=n_cycles, time_bandwidth=time_bandwidth, return_itc=True) return power, plv def morlet_analysis(epochs, n_cycles=4): """ Parameters ---------- epochs : list of epochs Returns ------- result : numpy array The result of the multitaper analysis. """ frequencies = np.arange(6., 30., 2.) # n_cycles = frequencies / 2. power, plv = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles, return_itc=True, verbose=True) return power, plv def single_trial_tf(epochs, frequencies, n_cycles=4.): """ Parameters ---------- epochs : Epochs object The epochs to calculate TF analysis on. frequencies : numpy array n_cycles : int The number of cycles for the Morlet wavelets. Returns ------- results : numpy array """ results = [] for j in range(len(epochs)): tfr = cwt_morlet(epochs.get_data()[j], sfreq=epochs.info["sfreq"], freqs=frequencies, use_fft=True, n_cycles=n_cycles, # decim=2, zero_mean=False) results.append(tfr) return results def calc_spatial_resolution(freqs, n_cycles): """Calculate the spatial resolution for a Morlet wavelet. The formula is: (freqs * cycles)*2. Parameters ---------- freqs : numpy array The frequencies to be calculated. n_cycles : int or numpy array The number of cycles used. Can be integer for the same cycle for all frequencies, or a numpy array for individual cycles per frequency. Returns ------- result : numpy array The results """ return (freqs / float(n_cycles)) * 2 def calc_wavelet_duration(freqs, n_cycles): """Calculate the wavelet duration for a Morlet wavelet in ms. The formula is: (cycle / frequencies / pi)*1000 Parameters ---------- freqs : numpy array The frequencies to be calculated. n_cycles : int or numpy array The number of cycles used. Can be integer for the same cycle for all frequencies, or a numpy array for individual cycles per frequency. Returns ------- result : numpy array The results """ return (float(n_cycles) / freqs / np.pi) * 1000
bsd-3-clause
yangw1234/BigDL
pyspark/bigdl/optim/optimizer.py
2
40389
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import multiprocessing import os import sys from distutils.dir_util import mkpath from py4j.java_gateway import JavaObject from pyspark.rdd import RDD from bigdl.util.common import DOUBLEMAX from bigdl.util.common import JTensor from bigdl.util.common import JavaValue from bigdl.util.common import callBigDlFunc from bigdl.util.common import callJavaFunc from bigdl.util.common import get_node_and_core_number from bigdl.util.common import init_engine from bigdl.util.common import to_list from bigdl.dataset.dataset import * if sys.version >= '3': long = int unicode = str class Top1Accuracy(JavaValue): """ Caculate the percentage that output's max probability index equals target. >>> top1 = Top1Accuracy() creating: createTop1Accuracy """ def __init__(self, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type) class TreeNNAccuracy(JavaValue): """ Caculate the percentage that output's max probability index equals target. >>> top1 = TreeNNAccuracy() creating: createTreeNNAccuracy """ def __init__(self, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type) class Top5Accuracy(JavaValue): """ Caculate the percentage that output's max probability index equals target. >>> top5 = Top5Accuracy() creating: createTop5Accuracy """ def __init__(self, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type) class Loss(JavaValue): """ This evaluation method is calculate loss of output with respect to target >>> from bigdl.nn.criterion import ClassNLLCriterion >>> loss = Loss() creating: createClassNLLCriterion creating: createLoss >>> loss = Loss(ClassNLLCriterion()) creating: createClassNLLCriterion creating: createLoss """ def __init__(self, cri=None, bigdl_type="float"): from bigdl.nn.criterion import ClassNLLCriterion if cri is None: cri = ClassNLLCriterion() JavaValue.__init__(self, None, bigdl_type, cri) class HitRatio(JavaValue): """ Hit Ratio(HR) used in recommandation application. HR intuitively measures whether the test item is present on the top-k list. >>> hr10 = HitRatio(k = 10) creating: createHitRatio """ def __init__(self, k = 10, neg_num = 100, bigdl_type="float"): """ Create hit ratio validation method. :param k: top k :param neg_num: number of negative items. """ JavaValue.__init__(self, None, bigdl_type, k, neg_num) class NDCG(JavaValue): """ Normalized Discounted Cumulative Gain(NDCG). NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks. >>> ndcg = NDCG(k = 10) creating: createNDCG """ def __init__(self, k = 10, neg_num = 100, bigdl_type="float"): """ Create NDCG validation method. :param k: top k :param neg_num: number of negative items. """ JavaValue.__init__(self, None, bigdl_type, k, neg_num) class MAE(JavaValue): """ This evaluation method calculates the mean absolute error of output with respect to target. >>> mae = MAE() creating: createMAE """ def __init__(self, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type) class MaxIteration(JavaValue): """ A trigger specifies a timespot or several timespots during training, and a corresponding action will be taken when the timespot(s) is reached. MaxIteration is a trigger that triggers an action when training reaches the number of iterations specified by "max". Usually used as end_trigger when creating an Optimizer. >>> maxIteration = MaxIteration(20) creating: createMaxIteration """ def __init__(self, max, bigdl_type="float"): """ Create a MaxIteration trigger. :param max: max """ JavaValue.__init__(self, None, bigdl_type, max) class MaxEpoch(JavaValue): """ A trigger specifies a timespot or several timespots during training, and a corresponding action will be taken when the timespot(s) is reached. MaxEpoch is a trigger that triggers an action when training reaches the number of epochs specified by "max_epoch". Usually used as end_trigger when creating an Optimizer. >>> maxEpoch = MaxEpoch(2) creating: createMaxEpoch """ def __init__(self, max_epoch, bigdl_type="float"): """ Create a MaxEpoch trigger. :param max_epoch: max_epoch """ JavaValue.__init__(self, None, bigdl_type, max_epoch) class EveryEpoch(JavaValue): """ A trigger specifies a timespot or several timespots during training, and a corresponding action will be taken when the timespot(s) is reached. EveryEpoch is a trigger that triggers an action when each epoch finishs. Could be used as trigger in setvalidation and setcheckpoint in Optimizer, and also in TrainSummary.set_summary_trigger. >>> everyEpoch = EveryEpoch() creating: createEveryEpoch """ def __init__(self, bigdl_type="float"): """ Create a EveryEpoch trigger. """ JavaValue.__init__(self, None, bigdl_type) class SeveralIteration(JavaValue): """ A trigger specifies a timespot or several timespots during training, and a corresponding action will be taken when the timespot(s) is reached. SeveralIteration is a trigger that triggers an action every "n" iterations. Could be used as trigger in setvalidation and setcheckpoint in Optimizer, and also in TrainSummary.set_summary_trigger. >>> serveralIteration = SeveralIteration(2) creating: createSeveralIteration """ def __init__(self, interval, bigdl_type="float"): """ Create a SeveralIteration trigger. :param interval: interval is the "n" where an action is triggeredevery "n" iterations """ JavaValue.__init__(self, None, bigdl_type, interval) class MaxScore(JavaValue): """ A trigger that triggers an action when validation score larger than "max" score >>> maxScore = MaxScore(0.4) creating: createMaxScore """ def __init__(self, max, bigdl_type="float"): """ Create a MaxScore trigger. :param max: max score """ JavaValue.__init__(self, None, bigdl_type, max) class MinLoss(JavaValue): """ A trigger that triggers an action when training loss less than "min" loss >>> minLoss = MinLoss(0.1) creating: createMinLoss """ def __init__(self, min, bigdl_type="float"): """ Create a MinLoss trigger. :param min: min loss """ JavaValue.__init__(self, None, bigdl_type, min) class Poly(JavaValue): """ A learning rate decay policy, where the effective learning rate follows a polynomial decay, to be zero by the max_iteration. Calculation: base_lr (1 - iter/max_iteration) ^ (power) :param power: coeffient of decay, refer to calculation formula :param max_iteration: max iteration when lr becomes zero >>> poly = Poly(0.5, 2) creating: createPoly """ def __init__(self, power, max_iteration, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, power, max_iteration) class Exponential(JavaValue): """ [[Exponential]] is a learning rate schedule, which rescale the learning rate by lr_{n + 1} = lr * decayRate `^` (iter / decayStep) :param decay_step the inteval for lr decay :param decay_rate decay rate :param stair_case if true, iter / decayStep is an integer division and the decayed learning rate follows a staircase function. >>> exponential = Exponential(100, 0.1) creating: createExponential """ def __init__(self, decay_step, decay_rate, stair_case=False, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, decay_step, decay_rate, stair_case) class Step(JavaValue): """ A learning rate decay policy, where the effective learning rate is calculated as base_lr * gamma ^ (floor(iter / step_size)) :param step_size: :param gamma: >>> step = Step(2, 0.3) creating: createStep """ def __init__(self, step_size, gamma, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, step_size, gamma) class Default(JavaValue): """ A learning rate decay policy, where the effective learning rate is calculated as base_lr * gamma ^ (floor(iter / step_size)) :param step_size :param gamma >>> step = Default() creating: createDefault """ def __init__(self, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type) class Plateau(JavaValue): """ Plateau is the learning rate schedule when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. It monitors a quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. :param monitor quantity to be monitored, can be Loss or score :param factor factor by which the learning rate will be reduced. new_lr = lr * factor :param patience number of epochs with no improvement after which learning rate will be reduced. :param mode one of {min, max}. In min mode, lr will be reduced when the quantity monitored has stopped decreasing; in max mode it will be reduced when the quantity monitored has stopped increasing :param epsilon threshold for measuring the new optimum, to only focus on significant changes. :param cooldown number of epochs to wait before resuming normal operation after lr has been reduced. :param min_lr lower bound on the learning rate. >>> plateau = Plateau("score") creating: createPlateau """ def __init__(self, monitor, factor=0.1, patience=10, mode="min", epsilon=1e-4, cooldown=0, min_lr=0.0, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, monitor, factor, patience, mode, epsilon, cooldown, min_lr) class Warmup(JavaValue): """ A learning rate gradual increase policy, where the effective learning rate increase delta after each iteration. Calculation: base_lr + delta * iteration :param delta: increase amount after each iteration >>> warmup = Warmup(0.05) creating: createWarmup """ def __init__(self, delta, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, delta) class SequentialSchedule(JavaValue): """ Stack several learning rate schedulers. :param iterationPerEpoch: iteration numbers per epoch >>> sequentialSchedule = SequentialSchedule(5) creating: createSequentialSchedule >>> poly = Poly(0.5, 2) creating: createPoly >>> test = sequentialSchedule.add(poly, 5) """ def __init__(self, iteration_per_epoch, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, iteration_per_epoch) def add(self, scheduler, max_iteration, bigdl_type="float"): """ Add a learning rate scheduler to the contained `schedules` :param scheduler: learning rate scheduler to be add :param max_iteration: iteration numbers this scheduler will run """ return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration) class OptimMethod(JavaValue): def __init__(self, jvalue, bigdl_type, *args): if (jvalue): assert(type(jvalue) == JavaObject) self.value = jvalue else: self.value = callBigDlFunc( bigdl_type, JavaValue.jvm_class_constructor(self), *args) self.bigdl_type = bigdl_type @staticmethod def load(path, bigdl_type="float"): """ load optim method :param path: file path """ return callBigDlFunc(bigdl_type, "loadOptimMethod", path) def save(self, path, overWrite): """ save OptimMethod :param path path :param overWrite whether to overwrite """ method=self.value return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite) class SGD(OptimMethod): """ A plain implementation of SGD :param learningrate learning rate :param learningrate_decay learning rate decay :param weightdecay weight decay :param momentum momentum :param dampening dampening for momentum :param nesterov enables Nesterov momentum :param learningrates 1D tensor of individual learning rates :param weightdecays 1D tensor of individual weight decays >>> sgd = SGD() creating: createDefault creating: createSGD """ def __init__(self, learningrate=1e-3, learningrate_decay=0.0, weightdecay=0.0, momentum=0.0, dampening=DOUBLEMAX, nesterov=False, leaningrate_schedule=None, learningrates=None, weightdecays=None, bigdl_type="float"): super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay, momentum, dampening, nesterov, leaningrate_schedule if (leaningrate_schedule) else Default(), JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays)) class Adagrad(OptimMethod): """ An implementation of Adagrad. See the original paper: http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf :param learningrate learning rate :param learningrate_decay learning rate decay :param weightdecay weight decay >>> adagrad = Adagrad() creating: createAdagrad """ def __init__(self, learningrate=1e-3, learningrate_decay=0.0, weightdecay=0.0, bigdl_type="float"): super(Adagrad, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay) class LBFGS(OptimMethod): """ This implementation of L-BFGS relies on a user-provided line search function (state.lineSearch). If this function is not provided, then a simple learningRate is used to produce fixed size steps. Fixed size steps are much less costly than line searches, and can be useful for stochastic problems. The learning rate is used even when a line search is provided. This is also useful for large-scale stochastic problems, where opfunc is a noisy approximation of f(x). In that case, the learning rate allows a reduction of confidence in the step size. :param max_iter Maximum number of iterations allowed :param max_eval Maximum number of function evaluations :param tolfun Termination tolerance on the first-order optimality :param tolx Termination tol on progress in terms of func/param changes :param ncorrection :param learningrate :param verbose :param linesearch A line search function :param linesearch_options If no line search provided, then a fixed step size is used >>> lbfgs = LBFGS() creating: createLBFGS """ def __init__(self, max_iter=20, max_eval=DOUBLEMAX, tolfun=1e-5, tolx=1e-9, ncorrection=100, learningrate=1.0, verbose=False, linesearch=None, linesearch_options=None, bigdl_type="float"): if linesearch or linesearch_options: raise ValueError('linesearch and linesearch_options must be None in LBFGS') super(LBFGS, self).__init__(None, bigdl_type, max_iter, max_eval, tolfun, tolx, ncorrection, learningrate, verbose, linesearch, linesearch_options) class Adadelta(OptimMethod): """ Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701 :param decayrate interpolation parameter rho :param epsilon for numerical stability >>> adagrad = Adadelta() creating: createAdadelta """ def __init__(self, decayrate = 0.9, epsilon = 1e-10, bigdl_type="float"): super(Adadelta, self).__init__(None, bigdl_type, decayrate, epsilon) class Adam(OptimMethod): """ An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf :param learningrate learning rate :param learningrate_decay learning rate decay :param beta1 first moment coefficient :param beta2 second moment coefficient :param epsilon for numerical stability >>> adam = Adam() creating: createAdam """ def __init__(self, learningrate = 1e-3, learningrate_decay = 0.0, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, bigdl_type="float"): super(Adam, self).__init__(None, bigdl_type, learningrate, learningrate_decay, beta1, beta2, epsilon) class ParallelAdam(OptimMethod): """ An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf :param learningrate learning rate :param learningrate_decay learning rate decay :param beta1 first moment coefficient :param beta2 second moment coefficient :param epsilon for numerical stability >>> init_engine() >>> pAdam = ParallelAdam() creating: createParallelAdam """ def __init__(self, learningrate = 1e-3, learningrate_decay = 0.0, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, parallel_num = -1, bigdl_type="float"): if parallel_num == -1: parallel_num = get_node_and_core_number()[1] super(ParallelAdam, self).__init__(None, bigdl_type, learningrate, learningrate_decay, beta1, beta2, epsilon, parallel_num) class Ftrl(OptimMethod): """ An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf. Support L1 penalty, L2 penalty and shrinkage-type L2 penalty. :param learningrate learning rate :param learningrate_power double, must be less or equal to zero. Default is -0.5. :param initial_accumulator_value double, the starting value for accumulators, require zero or positive values. :param l1_regularization_strength double, must be greater or equal to zero. Default is zero. :param l2_regularization_strength double, must be greater or equal to zero. Default is zero. :param l2_shrinkage_regularization_strength double, must be greater or equal to zero. Default is zero. This differs from l2RegularizationStrength above. L2 above is a stabilization penalty, whereas this one is a magnitude penalty. >>> ftrl = Ftrl() creating: createFtrl >>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5) creating: createFtrl """ def __init__(self, learningrate = 1e-3, learningrate_power = -0.5, initial_accumulator_value = 0.1, l1_regularization_strength = 0.0, l2_regularization_strength = 0.0, l2_shrinkage_regularization_strength = 0.0, bigdl_type="float"): super(Ftrl, self).__init__(None, bigdl_type, learningrate, learningrate_power, initial_accumulator_value, l1_regularization_strength, l2_regularization_strength, l2_shrinkage_regularization_strength) class Adamax(OptimMethod): """ An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf :param learningrate learning rate :param beta1 first moment coefficient :param beta2 second moment coefficient :param epsilon for numerical stability >>> adagrad = Adamax() creating: createAdamax """ def __init__(self, learningrate = 0.002, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-38, bigdl_type="float"): super(Adamax, self).__init__(None, bigdl_type, learningrate, beta1, beta2, epsilon) class RMSprop(OptimMethod): """ An implementation of RMSprop :param learningrate learning rate :param learningrate_decay learning rate decay :param decayrate decay rate, also called rho :param epsilon for numerical stability >>> adagrad = RMSprop() creating: createRMSprop """ def __init__(self, learningrate = 1e-2, learningrate_decay = 0.0, decayrate = 0.99, epsilon = 1e-8, bigdl_type="float"): super(RMSprop, self).__init__(None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon) class MultiStep(JavaValue): """ similar to step but it allows non uniform steps defined by stepSizes :param step_size: the series of step sizes used for lr decay :param gamma: coefficient of decay >>> step = MultiStep([2, 5], 0.3) creating: createMultiStep """ def __init__(self, step_sizes, gamma, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma) class BaseOptimizer(JavaValue): def set_model(self, model): """ Set model. :param model: new model """ self.value.setModel(model.value) def set_criterion(self, criterion): """ set new criterion, for optimizer reuse :param criterion: new criterion :return: """ callBigDlFunc(self.bigdl_type, "setCriterion", self.value, criterion) def set_checkpoint(self, checkpoint_trigger, checkpoint_path, isOverWrite=True): """ Configure checkpoint settings. :param checkpoint_trigger: the interval to write snapshots :param checkpoint_path: the path to write snapshots into :param isOverWrite: whether to overwrite existing snapshots in path.default is True """ if not os.path.exists(checkpoint_path): mkpath(checkpoint_path) callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value, checkpoint_trigger, checkpoint_path, isOverWrite) def set_gradclip_const(self, min_value, max_value): """ Configure constant clipping settings. :param min_value: the minimum value to clip by :param max_value: the maxmimum value to clip by """ callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value) def set_gradclip_l2norm(self, clip_norm): """ Configure L2 norm clipping settings. :param clip_norm: gradient L2-Norm threshold """ callBigDlFunc(self.bigdl_type, "setL2NormClip", self.value, clip_norm) def disable_gradclip(self): """ disable clipping. """ callBigDlFunc(self.bigdl_type, "disableClip", self.value) # return a module def optimize(self): """ Do an optimization. """ jmodel = callJavaFunc(self.value.optimize) from bigdl.nn.layer import Layer return Layer.of(jmodel) def set_train_summary(self, summary): """ Set train summary. A TrainSummary object contains information necessary for the optimizer to know how often the logs are recorded, where to store the logs and how to retrieve them, etc. For details, refer to the docs of TrainSummary. :param summary: a TrainSummary object """ callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value, summary) return self def set_val_summary(self, summary): """ Set validation summary. A ValidationSummary object contains information necessary for the optimizer to know how often the logs are recorded, where to store the logs and how to retrieve them, etc. For details, refer to the docs of ValidationSummary. :param summary: a ValidationSummary object """ callBigDlFunc(self.bigdl_type, "setValSummary", self.value, summary) return self def prepare_input(self): """ Load input. Notebook user can call this method to seprate load data and create optimizer time """ print("Loading input ...") self.value.prepareInput() def set_end_when(self, end_when): """ When to stop, passed in a [[Trigger]] """ self.value.setEndWhen(end_when.value) return self class Optimizer(BaseOptimizer): # NOTE: This is a deprecated method, you should use `create` method instead. def __init__(self, model, training_rdd, criterion, end_trigger, batch_size, optim_method=None, bigdl_type="float"): """ Create a distributed optimizer. :param model: the neural net model :param training_rdd: the training dataset :param criterion: the loss function :param optim_method: the algorithm to use for optimization, e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD. :param end_trigger: when to end the optimization :param batch_size: training batch size """ self.pvalue = DistriOptimizer(model, training_rdd, criterion, end_trigger, batch_size, optim_method, bigdl_type) self.value = self.pvalue.value self.bigdl_type = self.pvalue.bigdl_type @staticmethod def create(model, training_set, criterion, end_trigger=None, batch_size=32, optim_method=None, cores=None, bigdl_type="float"): """ Create an optimizer. Depend on the input type, the returning optimizer can be a local optimizer \ or a distributed optimizer. :param model: the neural net model :param training_set: (features, label) for local mode. RDD[Sample] for distributed mode. :param criterion: the loss function :param optim_method: the algorithm to use for optimization, e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD. :param end_trigger: when to end the optimization. default value is MapEpoch(1) :param batch_size: training batch size :param cores: This is for local optimizer only and use total physical cores as the default value """ if not end_trigger: end_trigger = MaxEpoch(1) if not optim_method: optim_method = SGD() if isinstance(training_set, RDD) or isinstance(training_set, DataSet): return DistriOptimizer(model=model, training_rdd=training_set, criterion=criterion, end_trigger=end_trigger, batch_size=batch_size, optim_method=optim_method, bigdl_type=bigdl_type) elif isinstance(training_set, tuple) and len(training_set) == 2: x, y = training_set return LocalOptimizer(X=x, Y=y, model=model, criterion=criterion, end_trigger=end_trigger, batch_size=batch_size, optim_method=optim_method, cores=cores, bigdl_type="float") else: raise Exception("Not supported training set: %s" % type(training_set)) def set_validation(self, batch_size, val_rdd, trigger, val_method=None): """ Configure validation settings. :param batch_size: validation batch size :param val_rdd: validation dataset :param trigger: validation interval :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss" """ if val_method is None: val_method = [Top1Accuracy()] func_name = "setValidation" if isinstance(val_rdd, DataSet): func_name = "setValidationFromDataSet" callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size, trigger, val_rdd, to_list(val_method)) def set_traindata(self, training_rdd, batch_size): """ Set new training dataset, for optimizer reuse :param training_rdd: the training dataset :param batch_size: training batch size :return: """ callBigDlFunc(self.bigdl_type, "setTrainData", self.value, training_rdd, batch_size) class DistriOptimizer(Optimizer): def __init__(self, model, training_rdd, criterion, end_trigger, batch_size, optim_method=None, bigdl_type="float"): """ Create an optimizer. :param model: the neural net model :param training_data: the training dataset :param criterion: the loss function :param optim_method: the algorithm to use for optimization, e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD. :param end_trigger: when to end the optimization :param batch_size: training batch size """ if not optim_method: optim_methods = {model.name(): SGD()} elif isinstance(optim_method, OptimMethod): optim_methods = {model.name(): optim_method} elif isinstance(optim_method, JavaObject): optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)} else: optim_methods = optim_method if isinstance(training_rdd, RDD): JavaValue.__init__(self, None, bigdl_type, model.value, training_rdd, criterion, optim_methods, end_trigger, batch_size) elif isinstance(training_rdd, DataSet): self.bigdl_type = bigdl_type self.value = callBigDlFunc(self.bigdl_type, "createDistriOptimizerFromDataSet", model.value, training_rdd, criterion, optim_methods, end_trigger, batch_size) class LocalOptimizer(BaseOptimizer): """ Create an optimizer. :param model: the neural net model :param X: the training features which is an ndarray or list of ndarray :param Y: the training label which is an ndarray :param criterion: the loss function :param optim_method: the algorithm to use for optimization, e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD. :param end_trigger: when to end the optimization :param batch_size: training batch size :param cores: by default is the total physical cores. """ def __init__(self, X, Y, model, criterion, end_trigger, batch_size, optim_method=None, cores=None, bigdl_type="float"): if not optim_method: optim_methods = {model.name(): SGD()} elif isinstance(optim_method, OptimMethod): optim_methods = {model.name(): optim_method} elif isinstance(optim_method, JavaObject): optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)} else: optim_methods = optim_method if cores is None: cores = multiprocessing.cpu_count() JavaValue.__init__(self, None, bigdl_type, [JTensor.from_ndarray(X) for X in to_list(X)], JTensor.from_ndarray(Y), model.value, criterion, optim_methods, end_trigger, batch_size, cores) def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None): """ Configure validation settings. :param batch_size: validation batch size :param X_val: features of validation dataset :param Y_val: label of validation dataset :param trigger: validation interval :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss" """ if val_method is None: val_method = [Top1Accuracy()] callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size, trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)], JTensor.from_ndarray(Y_val), to_list(val_method)) class TrainSummary(JavaValue, ): """ A logging facility which allows user to trace how indicators (e.g. learning rate, training loss, throughput, etc.) change with iterations/time in an optimization process. TrainSummary is for training indicators only (check ValidationSummary for validation indicators). It contains necessary information for the optimizer to know where to store the logs, how to retrieve the logs, and so on. - The logs are written in tensorflow-compatible format so that they can be visualized directly using tensorboard. Also the logs can be retrieved as ndarrays and visualized using python libraries such as matplotlib (in notebook, etc.). Use optimizer.setTrainSummary to enable train logger. """ def __init__(self, log_dir, app_name, bigdl_type="float"): """ Create a TrainSummary. Logs will be saved to log_dir/app_name/train. :param log_dir: the root dir to store the logs :param app_name: the application name """ JavaValue.__init__(self, None, bigdl_type, log_dir, app_name) def read_scalar(self, tag): """ Retrieve train logs by type. Return an array of records in the format (step,value,wallClockTime). - "Step" is the iteration count by default. :param tag: the type of the logs, Supported tags are: "LearningRate","Loss", "Throughput" """ return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value, tag) def set_summary_trigger(self, name, trigger): """ Set the interval of recording for each indicator. :param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod. :param trigger: trigger """ return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value, name, trigger) class ValidationSummary(JavaValue): """ A logging facility which allows user to trace how indicators (e.g. validation loss, top1 accuray, top5 accuracy etc.) change with iterations/time in an optimization process. ValidationSummary is for validation indicators only (check TrainSummary for train indicators). It contains necessary information for the optimizer to know where to store the logs, how to retrieve the logs, and so on. - The logs are written in tensorflow-compatible format so that they can be visualized directly using tensorboard. Also the logs can be retrieved as ndarrays and visualized using python libraries such as matplotlib (in notebook, etc.). Use optimizer.setValidationSummary to enable validation logger. """ def __init__(self, log_dir, app_name, bigdl_type="float"): """ Create a ValidationSummary. Logs will be saved to log_dir/app_name/train. By default, all ValidationMethod set into optimizer will be recorded and the recording interval is the same as trigger of ValidationMethod in the optimizer. :param log_dir: the root dir to store the logs :param app_name: the application name """ JavaValue.__init__(self, None, bigdl_type, log_dir, app_name) def read_scalar(self, tag): """ Retrieve validation logs by type. Return an array of records in the format (step,value,wallClockTime). - "Step" is the iteration count by default. :param tag: the type of the logs. The tag should match the name ofthe ValidationMethod set into the optimizer. e.g."Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy". """ return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value, tag) class L1L2Regularizer(JavaValue): """ Apply both L1 and L2 regularization :param l1 l1 regularization rate :param l2 l2 regularization rate """ def __init__(self, l1, l2, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, l1, l2) class ActivityRegularization(JavaValue): """ Apply both L1 and L2 regularization :param l1 l1 regularization rate :param l2 l2 regularization rate """ def __init__(self, l1, l2, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, l1, l2) class L1Regularizer(JavaValue): """ Apply L1 regularization :param l1 l1 regularization rate """ def __init__(self, l1, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, l1) class L2Regularizer(JavaValue): """ Apply L2 regularization :param l2 l2 regularization rate """ def __init__(self, l2, bigdl_type="float"): JavaValue.__init__(self, None, bigdl_type, l2) def _test(): import doctest from pyspark import SparkContext from bigdl.optim import optimizer from bigdl.util.common import init_engine from bigdl.util.common import create_spark_conf globs = optimizer.__dict__.copy() sc = SparkContext(master="local[4]", appName="test optimizer", conf=create_spark_conf()) init_engine() globs['sc'] = sc (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) if failure_count: exit(-1) if __name__ == "__main__": _test()
apache-2.0
kikocorreoso/mplutils
mplutils/axes.py
1
8516
# -*- coding: utf-8 -*- """ Created on Sun Feb 21 23:43:37 2016 @author: kiko """ from __future__ import division, absolute_import from .settings import RICH_DISPLAY import numpy as np if RICH_DISPLAY: from IPython.display import display def axes_set_better_defaults(ax, axes_color = '#777777', grid = False, show = False): """ Enter an Axes instance and it will change the defaults to an opinionated version of how a simple plot should be. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance axes_color : str A string indicating a valid matplotlib color. grid : bool If `True` the grid of the axes will be shown, if `False` (default) the grid, if active, will be supressed. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.set_axis_bgcolor((1, 1, 1)) ax.grid(grid) for key in ax.spines.keys(): if ax.spines[key].get_visible(): ax.spines[key].set_color(axes_color) ax.tick_params(axis = 'x', colors = axes_color) ax.tick_params(axis = 'y', colors = axes_color) ax.figure.set_facecolor('white') ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() # http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html def axes_set_axis_position(ax, spines = ['bottom', 'left'], pan = 0, show = False): """ Enter an Axes instance and depending the options it will display the axis where you selected. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance spines : str or iterable A string or an iterable of strings with the following valid options: 'bottom' : To active the bottom x-axis. 'top' : To active the top x-axis. 'left' : To active the left y-axis. 'right' : To active the right y-axis. pan : int or iterable A integer value or an iterable of integer values indicating the value to pan the axis. It has to have the same lenght and the same order than the spines input. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ if np.isscalar(spines): spines = (spines,) len_spines = 1 else: len_spines = len(spines) if np.isscalar(pan): pan = np.repeat(pan, len_spines) len_pan = 1 else: len_pan = len(pan) if len_pan > 1 and len_pan != len_spines: raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ') ('should be a scalar or should have the same length than `spines`.')) i = 0 for loc, spine in ax.spines.items(): if loc in spines: spine.set_position(('outward', pan[i])) # outward by `pan` points spine.set_smart_bounds(True) i += 1 else: #spine.set_color('none') # don't draw spine spine.set_visible(False) # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') ax.tick_params(labelleft = True) if 'right' in spines: ax.yaxis.set_ticks_position('right') ax.tick_params(labelright = True) if 'left' in spines and 'right' in spines: ax.yaxis.set_ticks_position('both') ax.tick_params(labelleft = True, labelright = True) if 'left' not in spines and 'right' not in spines: ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') ax.tick_params(labelbottom = True) if 'top' in spines: ax.xaxis.set_ticks_position('top') ax.tick_params(labeltop = True) if 'bottom' in spines and 'top' in spines: ax.xaxis.set_ticks_position('both') ax.tick_params(labelbottom = True, labeltop = True) if 'bottom' not in spines and 'top' not in spines: ax.xaxis.set_ticks([]) ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() def axes_set_origin(ax, x = 0, y = 0, xticks_position = 'bottom', yticks_position = 'left', xticks_visible = True, yticks_visible = True, show = False): """ function to locate x-axis and y-axis on the position you want. Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance x : int or float Value indicating the position on the y-axis where you want the x-axis to be located. y : int or float Value indicating the position on the x-axis where you want the y-axis to be located. xticks_position : str Default value is 'bottom' if you want the ticks to be located below the x-axis. 'top' if you want the ticks to be located above the x-axis. yticks_position : str Default value is 'left' if you want the ticks to be located on the left side of the y-axis. 'right' if you want the ticks to be located on the right side of the y-axis. xticks_visible : bool Default value is True if you want ticks visible on the x-axis. False if you don't want to see the ticks on the x-axis. yticks_visible : bool Default value is True if you want ticks visible on the y-axis. False if you don't want to see the ticks on the y-axis. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.xaxis.set_ticks_position(xticks_position) ax.spines['bottom'].set_position(('data', x)) ax.yaxis.set_ticks_position(yticks_position) ax.spines['left'].set_position(('data', y)) if not xticks_visible: ax.set_xticks([]) if not yticks_visible: ax.set_yticks([]) ax.figure.canvas.draw() if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show() def axes_set_aspect_ratio(ax, ratio = 'equal', show = True): """ function that accepts an Axes instance and update the information setting the aspect ratio of the axis to the defined quantity Parameters: ----------- ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance ratio : str or int/float The value can be a string with the following values: 'equal' : (default) same scaling from data to plot units for x and y 'auto' : automatic; fill position rectangle with data Or a: number (int or float) : a circle will be stretched such that the height is num times the width. aspec t =1 is the same as aspect='equal'. show : bool if `True` the figure will be shown. If you are working in a rich display environment like the IPython qtconsole or the Jupyter notebook it will use `IPython.display.display` to show the figure. If you are working otherwise it will call the `show` of the `Figure` instance. """ ax.set_aspect(ratio, adjustable = None) if show: if RICH_DISPLAY: display(ax.figure) else: ax.figure.show()
mit
DiamondLightSource/auto_tomo_calibration-experimental
measure_resolution/lmfit-py/examples/confidence_interval.py
4
2924
# -*- coding: utf-8 -*- """ Created on Sun Apr 15 19:47:45 2012 @author: Tillsten """ import numpy as np from lmfit import Parameters, Minimizer, conf_interval, report_fit, report_ci from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign from scipy.optimize import leastsq try: import matplotlib.pyplot as plt import pylab HASPYLAB = True except ImportError: HASPYLAB = False p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.33) p_true.add('shift', value=0.123) p_true.add('decay', value=0.010) def residual(pars, x, data=None): amp = pars['amp'].value per = pars['period'].value shift = pars['shift'].value decay = pars['decay'].value if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp*sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return (model - data) n = 2500 xmin = 0. xmax = 250.0 noise = random.normal(scale=0.7215, size=n) x = linspace(xmin, xmax, n) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) mini = Minimizer(residual, fit_params, fcn_args=(x,), fcn_kws={'data':data}) out = mini.leastsq() fit = residual(out.params, x) print( ' N fev = ', out.nfev) print( out.chisqr, out.redchi, out.nfree) report_fit(out.params) #ci=calc_ci(out) ci, tr = conf_interval(mini, out, trace=True) report_ci(ci) if HASPYLAB: names=out.params.keys() i=0 gs=pylab.GridSpec(4,4) sx={} sy={} for fixed in names: j=0 for free in names: if j in sx and i in sy: ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i]) elif i in sy: ax=pylab.subplot(gs[i,j],sharey=sy[i]) sx[j]=ax elif j in sx: ax=pylab.subplot(gs[i,j],sharex=sx[j]) sy[i]=ax else: ax=pylab.subplot(gs[i,j]) sy[i]=ax sx[j]=ax if i<3: pylab.setp( ax.get_xticklabels(), visible=False) else: ax.set_xlabel(free) if j>0: pylab.setp( ax.get_yticklabels(), visible=False) else: ax.set_ylabel(fixed) res=tr[fixed] prob=res['prob'] f=prob<0.96 x,y=res[free], res[fixed] ax.scatter(x[f],y[f], c=1-prob[f],s=200*(1-prob[f]+0.5)) ax.autoscale(1,1) j=j+1 i=i+1 pylab.show()
apache-2.0
lizardsystem/threedilib
threedilib/modeling/convert.py
1
8275
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst. # -*- coding: utf-8 -*- """ Convert shapefiles with z coordinates. Choose from the following formats: 'inp' to create an inp file, 'img' to create an image with a plot of the feature, or 'shp' to output a shapefile with the average height of a feature stored in an extra attribute. """ from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division import argparse import math import os import shutil import tempfile from matplotlib.backends import backend_agg from matplotlib import figure from osgeo import gdal from osgeo import ogr from PIL import Image ogr.UseExceptions() def get_parser(): """ Return argument parser. """ parser = argparse.ArgumentParser( description=__doc__, ) parser.add_argument('source_path', metavar='SOURCE', help=('Path to source shapefile.')) parser.add_argument('target_path', metavar='TARGET', help=('Path to target file.')) parser.add_argument('-of', '--output-format', metavar='FORMAT', choices=['inp', 'img', 'shp'], default='shp', help=("Path to output.")) return parser class InputFileWriter(object): """ Writer for input files. """ def __init__(self, path): """ Init the counters and tmpdirs """ self.path = path self.node_count = 0 self.link_count = 0 def __enter__(self): """ Setup tempfiles. """ self.temp_directory = tempfile.mkdtemp() self.node_file = open( os.path.join(self.temp_directory, 'nodes'), 'a+', ) self.link_file = open( os.path.join(self.temp_directory, 'links'), 'a+', ) return self def __exit__(self, type, value, traceback): """ Write 'inputfile' at path. """ with open(self.path, 'w') as input_file: self.node_file.seek(0) input_file.write(self.node_file.read()) input_file.write('-1\n') self.link_file.seek(0) input_file.write(self.link_file.read()) self.node_file.close() self.link_file.close() shutil.rmtree(self.temp_directory) def _write_node(self, node): """ Write a node. """ self.node_count += 1 self.node_file.write('{} {} {} {}\n'.format( self.node_count, node[0], node[1], -node[2] # Depth, not height! )) def _write_link(self): """ Write a link between previous node and next node.""" self.link_count += 1 self.link_file.write('{} {} {}\n'.format( self.link_count, self.node_count, self.node_count + 1, )) def _add_wkb_line_string(self, wkb_line_string): """ Add linestring as nodes and links. """ nodes = [wkb_line_string.GetPoint(i) for i in range(wkb_line_string.GetPointCount())] # Add nodes and links up to the last node for i in range(len(nodes) - 1): self._write_node(nodes[i]) self._write_link() # Add last node, link already covered. self._write_node(nodes[-1]) def add_feature(self, feature): """ Add feature as nodes and links. """ geometry = feature.geometry() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: self._add_wkb_line_string(geometry) elif geometry_type == ogr.wkbMultiLineString25D: for wkb_line_string in geometry: self._add_wkb_line_string(wkb_line_string) class ImageWriter(object): """ Writer for images. """ def __init__(self, path): self.count = 0 self.path = path def __enter__(self): return self def _add_wkb_line_string(self, wkb_line_string, label): """ Plot linestring as separate image. """ # Get data x, y, z = zip(*[wkb_line_string.GetPoint(i) for i in range(wkb_line_string.GetPointCount())]) # Determine distance along line l = [0] for i in range(len(z) - 1): l.append(l[-1] + math.sqrt( (x[i + 1] - x[i]) ** 2 + (y[i + 1] - y[i]) ** 2, )) # Plot in matplotlib fig = figure.Figure() axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) axes.plot(l, z, label=label) axes.legend(loc='best', frameon=False) # Write to image backend_agg.FigureCanvasAgg(fig) buf, size = fig.canvas.print_to_buffer() image = Image.fromstring('RGBA', size, buf) root, ext = os.path.splitext(self.path) image.save(root + '{:00.0f}'.format(self.count) + ext) self.count += 1 def add_feature(self, feature): """ Currently saves every feature in a separate image. """ # Plotlabel label = '\n'.join([': '.join(str(v) for v in item) for item in feature.items().items()]) # Plot according to geometry type geometry = feature.geometry() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: self._add_wkb_line_string(geometry, label=label) elif geometry_type == ogr.wkbMultiLineString25D: for wkb_line_string in geometry: self._add_wkb_line_string(wkb_line_string, label=label) def __exit__(self, type, value, traceback): pass class ShapefileWriter(object): """ Writer for shapefiles. """ ATTRIBUTE = b'kruinhoogt' def __init__(self, path): self.count = 0 self.path = path self.datasource = None self.layer = None def __enter__(self): return self def create_datasource(self, feature): """ Create a datasource based on feature. """ root, ext = os.path.splitext(os.path.basename(self.path)) driver = ogr.GetDriverByName(b'ESRI Shapefile') datasource = driver.CreateDataSource(self.path) layer = datasource.CreateLayer(root) for i in range(feature.GetFieldCount()): layer.CreateField(feature.GetFieldDefnRef(i)) field_defn = ogr.FieldDefn(self.ATTRIBUTE, ogr.OFTReal) layer.CreateField(field_defn) self.datasource = datasource self.layer = layer def add_feature(self, feature): """ Currently saves every feature in a separate image. """ if self.layer is None: self.create_datasource(feature) layer_defn = self.layer.GetLayerDefn() # elevation geometry = feature.geometry().Clone() geometry_type = geometry.GetGeometryType() if geometry_type == ogr.wkbLineString25D: elevation = min([p[2] for p in geometry.GetPoints()]) else: # multilinestring elevation = min([p[2] for g in geometry for p in g.GetPoints()]) geometry.FlattenTo2D() new_feature = ogr.Feature(layer_defn) new_feature.SetGeometry(geometry) for k, v in feature.items().items(): new_feature[k] = v new_feature[self.ATTRIBUTE] = elevation self.layer.CreateFeature(new_feature) def __exit__(self, type, value, traceback): pass def convert(source_path, target_path, output_format): """ Convert shapefile to inp file.""" source_dataset = ogr.Open(str(source_path)) writers = dict( inp=InputFileWriter, img=ImageWriter, shp=ShapefileWriter, ) with writers[output_format](target_path) as writer: for source_layer in source_dataset: total = source_layer.GetFeatureCount() for count, source_feature in enumerate(source_layer, 1): writer.add_feature(source_feature) gdal.TermProgress_nocb(count / total) def main(): """ Call convert() with commandline args. """ convert(**vars(get_parser().parse_args())) if __name__ == '__main__': exit(main())
gpl-3.0
JackKelly/neuralnilm_prototype
scripts/e115.py
2
3654
from __future__ import print_function, division import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer from lasagne.nonlinearities import sigmoid, rectify from lasagne.objectives import crossentropy, mse from lasagne.init import Uniform, Normal from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer from lasagne.updates import adagrad, nesterov_momentum from functools import partial import os from neuralnilm.source import standardise from neuralnilm.experiment import run_experiment from neuralnilm.net import TrainingError import __main__ NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0] PATH = "/homes/dk3810/workspace/python/neuralnilm/figures" SAVE_PLOT_INTERVAL = 250 GRADIENT_STEPS = 100 """ e103 Discovered that bottom layer is hardly changing. So will try just a single lstm layer e104 standard init lower learning rate e106 lower learning rate to 0.001 e108 is e107 but with batch size of 5 e109 Normal(1) for LSTM e110 * Back to Uniform(5) for LSTM * Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f RESULTS: Seems to run fine again! e111 * Try with nntools head * peepholes=False RESULTS: appears to be working well. Haven't seen a NaN, even with training rate of 0.1 e112 * n_seq_per_batch = 50 e114 * Trying looking at layer by layer training again. * Start with single LSTM layer e115 * Learning rate = 1 """ def exp_a(name): source = RealApplianceSource( filename='/data/dk3810/ukdale.h5', appliances=[ ['fridge freezer', 'fridge', 'freezer'], 'hair straighteners', 'television' # 'dish washer', # ['washer dryer', 'washing machine'] ], max_appliance_powers=[300, 500, 200], #, 2500, 2400], on_power_thresholds=[20, 20, 20], #, 20, 20], max_input_power=1000, min_on_durations=[60, 60, 60], #, 1800, 1800], window=("2013-06-01", "2014-07-01"), seq_length=1000, output_one_appliance=False, boolean_targets=False, min_off_duration=60, train_buildings=[1], validation_buildings=[1], skip_probability=0, n_seq_per_batch=50 ) net = Net( experiment_name=name, source=source, save_plot_interval=SAVE_PLOT_INTERVAL, loss_function=crossentropy, updates=partial(nesterov_momentum, learning_rate=1.0), layers_config=[ { 'type': LSTMLayer, 'num_units': 50, 'W_in_to_cell': Uniform(5), 'gradient_steps': GRADIENT_STEPS, 'peepholes': False }, { 'type': DenseLayer, 'num_units': source.n_outputs, 'nonlinearity': sigmoid } ] ) return net def init_experiment(experiment): full_exp_name = NAME + experiment func_call = 'exp_{:s}(full_exp_name)'.format(experiment) print("***********************************") print("Preparing", full_exp_name, "...") net = eval(func_call) return net def main(): for experiment in list('a'): full_exp_name = NAME + experiment path = os.path.join(PATH, full_exp_name) try: net = init_experiment(experiment) run_experiment(net, path, epochs=5000) except KeyboardInterrupt: break except TrainingError as e: print("EXCEPTION:", e) if __name__ == "__main__": main()
mit
sinhrks/scikit-learn
examples/decomposition/plot_pca_vs_lda.py
176
2027
""" ======================================================= Comparison of LDA and PCA 2D projection of Iris dataset ======================================================= The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. """ print(__doc__) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LinearDiscriminantAnalysis(n_components=2) X_r2 = lda.fit(X, y).transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) plt.figure() colors = ['navy', 'turquoise', 'darkorange'] lw = 2 for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('PCA of IRIS dataset') plt.figure() for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('LDA of IRIS dataset') plt.show()
bsd-3-clause
Pymatteo/QtNMR
build/exe.win32-3.4/scipy/cluster/tests/test_hierarchy.py
7
34863
#! /usr/bin/env python # # Author: Damian Eads # Date: April 17, 2008 # # Copyright (C) 2008 Damian Eads # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (TestCase, run_module_suite, dec, assert_raises, assert_allclose, assert_equal, assert_) from scipy.lib.six import xrange, u import scipy.cluster.hierarchy from scipy.cluster.hierarchy import ( linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders, correspond, is_monotonic, maxdists, maxinconsts, maxRstat, is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram) from scipy.spatial.distance import pdist import hierarchy_test_data # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so # check if it's available try: # import matplotlib import matplotlib # and set the backend to be Agg (no gui) matplotlib.use('Agg') # before importing pyplot import matplotlib.pyplot as plt have_matplotlib = True except: have_matplotlib = False class TestLinkage(object): def test_linkage_empty_distance_matrix(self): # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. y = np.zeros((0,)) assert_raises(ValueError, linkage, y) ################### linkage def test_linkage_tdist(self): for method in ['single', 'complete', 'average', 'weighted', u('single')]: yield self.check_linkage_tdist, method def check_linkage_tdist(self, method): # Tests linkage(Y, method) on the tdist data set. Z = linkage(hierarchy_test_data.ytdist, method) expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) assert_allclose(Z, expectedZ, atol=1e-10) ################### linkage on Q def test_linkage_X(self): for method in ['centroid', 'median', 'ward']: yield self.check_linkage_q, method def check_linkage_q(self, method): # Tests linkage(Y, method) on the Q data set. Z = linkage(hierarchy_test_data.X, method) expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) assert_allclose(Z, expectedZ, atol=1e-06) class TestInconsistent(object): def test_inconsistent_tdist(self): for depth in hierarchy_test_data.inconsistent_ytdist: yield self.check_inconsistent_tdist, depth def check_inconsistent_tdist(self, depth): Z = hierarchy_test_data.linkage_ytdist_single assert_allclose(inconsistent(Z, depth), hierarchy_test_data.inconsistent_ytdist[depth]) class TestCopheneticDistance(object): def test_linkage_cophenet_tdist_Z(self): # Tests cophenet(Z) on tdist data set. expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]) Z = hierarchy_test_data.linkage_ytdist_single M = cophenet(Z) assert_allclose(M, expectedM, atol=1e-10) def test_linkage_cophenet_tdist_Z_Y(self): # Tests cophenet(Z, Y) on tdist data set. Z = hierarchy_test_data.linkage_ytdist_single (c, M) = cophenet(Z, hierarchy_test_data.ytdist) expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]) expectedc = 0.639931296433393415057366837573 assert_allclose(c, expectedc, atol=1e-10) assert_allclose(M, expectedM, atol=1e-10) class TestMLabLinkageConversion(object): def test_mlab_linkage_conversion_empty(self): # Tests from/to_mlab_linkage on empty linkage array. X = np.asarray([]) assert_equal(from_mlab_linkage([]), X) assert_equal(to_mlab_linkage([]), X) def test_mlab_linkage_conversion_single_row(self): # Tests from/to_mlab_linkage on linkage array with single row. Z = np.asarray([[0., 1., 3., 2.]]) Zm = [[1, 2, 3]] assert_equal(from_mlab_linkage(Zm), Z) assert_equal(to_mlab_linkage(Z), Zm) def test_mlab_linkage_conversion_multiple_rows(self): # Tests from/to_mlab_linkage on linkage array with multiple rows. Zm = np.asarray([[3, 6, 138], [4, 5, 219], [1, 8, 255], [2, 9, 268], [7, 10, 295]]) Z = np.array([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 255., 3.], [1., 8., 268., 4.], [6., 9., 295., 6.]], dtype=np.double) assert_equal(from_mlab_linkage(Zm), Z) assert_equal(to_mlab_linkage(Z), Zm) class TestFcluster(object): def test_fclusterdata(self): for t in hierarchy_test_data.fcluster_inconsistent: yield self.check_fclusterdata, t, 'inconsistent' for t in hierarchy_test_data.fcluster_distance: yield self.check_fclusterdata, t, 'distance' for t in hierarchy_test_data.fcluster_maxclust: yield self.check_fclusterdata, t, 'maxclust' def check_fclusterdata(self, t, criterion): # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set. expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] X = hierarchy_test_data.Q_X T = fclusterdata(X, criterion=criterion, t=t) assert_(is_isomorphic(T, expectedT)) def test_fcluster(self): for t in hierarchy_test_data.fcluster_inconsistent: yield self.check_fcluster, t, 'inconsistent' for t in hierarchy_test_data.fcluster_distance: yield self.check_fcluster, t, 'distance' for t in hierarchy_test_data.fcluster_maxclust: yield self.check_fcluster, t, 'maxclust' def check_fcluster(self, t, criterion): # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, criterion=criterion, t=t) assert_(is_isomorphic(T, expectedT)) def test_fcluster_monocrit(self): for t in hierarchy_test_data.fcluster_distance: yield self.check_fcluster_monocrit, t for t in hierarchy_test_data.fcluster_maxclust: yield self.check_fcluster_maxclust_monocrit, t def check_fcluster_monocrit(self, t): expectedT = hierarchy_test_data.fcluster_distance[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) assert_(is_isomorphic(T, expectedT)) def check_fcluster_maxclust_monocrit(self, t): expectedT = hierarchy_test_data.fcluster_maxclust[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) assert_(is_isomorphic(T, expectedT)) class TestLeaders(object): def test_leaders_single(self): # Tests leaders using a flat clustering generated by single linkage. X = hierarchy_test_data.Q_X Y = pdist(X) Z = linkage(Y) T = fcluster(Z, criterion='maxclust', t=3) Lright = (np.array([53, 55, 56]), np.array([2, 3, 1])) L = leaders(Z, T) assert_equal(L, Lright) class TestIsIsomorphic(object): def test_is_isomorphic_1(self): # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) a = [1, 1, 1] b = [2, 2, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_2(self): # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) a = [1, 7, 1] b = [2, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_3(self): # Tests is_isomorphic on test case #3 (no flat clusters) a = [] b = [] assert_(is_isomorphic(a, b)) def test_is_isomorphic_4A(self): # Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic) a = [1, 2, 3] b = [1, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_4B(self): # Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic) a = [1, 2, 3, 3] b = [1, 3, 2, 3] assert_(is_isomorphic(a, b) == False) assert_(is_isomorphic(b, a) == False) def test_is_isomorphic_4C(self): # Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic) a = [7, 2, 3] b = [6, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_5(self): # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random # clusters, random permutation of the labeling). for nc in [2, 3, 5]: yield self.help_is_isomorphic_randperm, 1000, nc def test_is_isomorphic_6(self): # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random # clusters, random permutation of the labeling, slightly # nonisomorphic.) for nc in [2, 3, 5]: yield self.help_is_isomorphic_randperm, 1000, nc, True, 5 def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0): for k in range(3): a = np.int_(np.random.rand(nobs) * nclusters) b = np.zeros(a.size, dtype=np.int_) P = np.random.permutation(nclusters) for i in xrange(0, a.shape[0]): b[i] = P[a[i]] if noniso: Q = np.random.permutation(nobs) b[Q[0:nerrors]] += 1 b[Q[0:nerrors]] %= nclusters assert_(is_isomorphic(a, b) == (not noniso)) assert_(is_isomorphic(b, a) == (not noniso)) class TestIsValidLinkage(object): def test_is_valid_linkage_various_size(self): for nrow, ncol, valid in [(2, 5, False), (2, 3, False), (1, 4, True), (2, 4, True)]: yield self.check_is_valid_linkage_various_size, nrow, ncol, valid def check_is_valid_linkage_various_size(self, nrow, ncol, valid): # Tests is_valid_linkage(Z) with linkage matrics of various sizes Z = np.asarray([[0, 1, 3.0, 2, 5], [3, 2, 4.0, 3, 3]], dtype=np.double) Z = Z[:nrow, :ncol] assert_(is_valid_linkage(Z) == valid) if not valid: assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_int_type(self): # Tests is_valid_linkage(Z) with integer type. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.int) assert_(is_valid_linkage(Z) == False) assert_raises(TypeError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_empty(self): # Tests is_valid_linkage(Z) with empty linkage. Z = np.zeros((0, 4), dtype=np.double) assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) assert_(is_valid_linkage(Z) == True) def test_is_valid_linkage_4_and_up_neg_index_left(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative indices (left). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,0] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_index_right(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative indices (right). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,1] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_dist(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative distances. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,2] = -0.5 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_counts(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative counts. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,3] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) class TestIsValidInconsistent(object): def test_is_valid_im_int_type(self): # Tests is_valid_im(R) with integer type. R = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.int) assert_(is_valid_im(R) == False) assert_raises(TypeError, is_valid_im, R, throw=True) def test_is_valid_im_various_size(self): for nrow, ncol, valid in [(2, 5, False), (2, 3, False), (1, 4, True), (2, 4, True)]: yield self.check_is_valid_im_various_size, nrow, ncol, valid def check_is_valid_im_various_size(self, nrow, ncol, valid): # Tests is_valid_im(R) with linkage matrics of various sizes R = np.asarray([[0, 1, 3.0, 2, 5], [3, 2, 4.0, 3, 3]], dtype=np.double) R = R[:nrow, :ncol] assert_(is_valid_im(R) == valid) if not valid: assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_empty(self): # Tests is_valid_im(R) with empty inconsistency matrix. R = np.zeros((0, 4), dtype=np.double) assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) assert_(is_valid_im(R) == True) def test_is_valid_im_4_and_up_neg_index_left(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link height means. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,0] = -2.0 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up_neg_index_right(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link height standard deviations. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,1] = -2.0 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up_neg_dist(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link counts. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,2] = -0.5 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) class TestNumObsLinkage(TestCase): def test_num_obs_linkage_empty(self): # Tests num_obs_linkage(Z) with empty linkage. Z = np.zeros((0, 4), dtype=np.double) self.assertRaises(ValueError, num_obs_linkage, Z) def test_num_obs_linkage_1x4(self): # Tests num_obs_linkage(Z) on linkage over 2 observations. Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) self.assertTrue(num_obs_linkage(Z) == 2) def test_num_obs_linkage_2x4(self): # Tests num_obs_linkage(Z) on linkage over 3 observations. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.double) self.assertTrue(num_obs_linkage(Z) == 3) def test_num_obs_linkage_4_and_up(self): # Tests num_obs_linkage(Z) on linkage on observation sets between sizes # 4 and 15 (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) self.assertTrue(num_obs_linkage(Z) == i) class TestLeavesList(object): def test_leaves_list_1x4(self): # Tests leaves_list(Z) on a 1x4 linkage. Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) to_tree(Z) assert_equal(leaves_list(Z), [0, 1]) def test_leaves_list_2x4(self): # Tests leaves_list(Z) on a 2x4 linkage. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.double) to_tree(Z) assert_equal(leaves_list(Z), [0, 1, 2]) def test_leaves_list_Q(self): for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']: yield self.check_leaves_list_Q, method def check_leaves_list_Q(self, method): # Tests leaves_list(Z) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) node = to_tree(Z) assert_equal(node.pre_order(), leaves_list(Z)) def test_Q_subtree_pre_order(self): # Tests that pre_order() works when called on sub-trees. X = hierarchy_test_data.Q_X Z = linkage(X, 'single') node = to_tree(Z) assert_equal(node.pre_order(), (node.get_left().pre_order() + node.get_right().pre_order())) class TestCorrespond(TestCase): def test_correspond_empty(self): # Tests correspond(Z, y) with empty linkage and condensed distance matrix. y = np.zeros((0,)) Z = np.zeros((0,4)) self.assertRaises(ValueError, correspond, Z, y) def test_correspond_2_and_up(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. for i in xrange(2, 4): y = np.random.rand(i*(i-1)//2) Z = linkage(y) self.assertTrue(correspond(Z, y)) for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) self.assertTrue(correspond(Z, y)) def test_correspond_4_and_up(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. Correspondance should be false. for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + list(zip(list(range(3, 5)), list(range(2, 4))))): y = np.random.rand(i*(i-1)//2) y2 = np.random.rand(j*(j-1)//2) Z = linkage(y) Z2 = linkage(y2) self.assertTrue(correspond(Z, y2) == False) self.assertTrue(correspond(Z2, y) == False) def test_correspond_4_and_up_2(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. Correspondance should be false. for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + list(zip(list(range(2, 7)), list(range(16, 21))))): y = np.random.rand(i*(i-1)//2) y2 = np.random.rand(j*(j-1)//2) Z = linkage(y) Z2 = linkage(y2) self.assertTrue(correspond(Z, y2) == False) self.assertTrue(correspond(Z2, y) == False) def test_num_obs_linkage_multi_matrix(self): # Tests num_obs_linkage with observation matrices of multiple sizes. for n in xrange(2, 10): X = np.random.rand(n, 4) Y = pdist(X) Z = linkage(Y) self.assertTrue(num_obs_linkage(Z) == n) class TestIsMonotonic(TestCase): def test_is_monotonic_empty(self): # Tests is_monotonic(Z) on an empty linkage. Z = np.zeros((0, 4)) self.assertRaises(ValueError, is_monotonic, Z) def test_is_monotonic_1x4(self): # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double) self.assertTrue(is_monotonic(Z) == True) def test_is_monotonic_2x4_T(self): # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 3]], dtype=np.double) self.assertTrue(is_monotonic(Z) == True) def test_is_monotonic_2x4_F(self): # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. Z = np.asarray([[0, 1, 0.4, 2], [2, 3, 0.3, 3]], dtype=np.double) self.assertTrue(is_monotonic(Z) == False) def test_is_monotonic_3x4_T(self): # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 2], [4, 5, 0.6, 4]], dtype=np.double) self.assertTrue(is_monotonic(Z) == True) def test_is_monotonic_3x4_F1(self): # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.2, 2], [4, 5, 0.6, 4]], dtype=np.double) self.assertTrue(is_monotonic(Z) == False) def test_is_monotonic_3x4_F2(self): # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. Z = np.asarray([[0, 1, 0.8, 2], [2, 3, 0.4, 2], [4, 5, 0.6, 4]], dtype=np.double) self.assertTrue(is_monotonic(Z) == False) def test_is_monotonic_3x4_F3(self): # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 2], [4, 5, 0.2, 4]], dtype=np.double) self.assertTrue(is_monotonic(Z) == False) def test_is_monotonic_tdist_linkage1(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # tdist data set. Expecting True. Z = linkage(hierarchy_test_data.ytdist, 'single') self.assertTrue(is_monotonic(Z) == True) def test_is_monotonic_tdist_linkage2(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # tdist data set. Perturbing. Expecting False. Z = linkage(hierarchy_test_data.ytdist, 'single') Z[2,2] = 0.0 self.assertTrue(is_monotonic(Z) == False) def test_is_monotonic_Q_linkage(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # Q data set. Expecting True. X = hierarchy_test_data.Q_X Z = linkage(X, 'single') self.assertTrue(is_monotonic(Z) == True) class TestMaxDists(object): def test_maxdists_empty_linkage(self): # Tests maxdists(Z) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxdists, Z) def test_maxdists_one_cluster_linkage(self): # Tests maxdists(Z) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) MD = maxdists(Z) expectedMD = calculate_maximum_distances(Z) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxdists_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: yield self.check_maxdists_Q_linkage, method def check_maxdists_Q_linkage(self, method): # Tests maxdists(Z) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) MD = maxdists(Z) expectedMD = calculate_maximum_distances(Z) assert_allclose(MD, expectedMD, atol=1e-15) class TestMaxInconsts(object): def test_maxinconsts_empty_linkage(self): # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) R = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_difrow_linkage(self): # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with # different numbers of clusters. Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.random.rand(2, 4) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_one_cluster_linkage(self): # Tests maxinconsts(Z, R) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxinconsts_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: yield self.check_maxinconsts_Q_linkage, method def check_maxinconsts_Q_linkage(self, method): # Tests maxinconsts(Z, R) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) R = inconsistent(Z) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R) assert_allclose(MD, expectedMD, atol=1e-15) class TestMaxRStat(object): def test_maxRstat_invalid_index(self): for i in [3.3, -1, 4]: yield self.check_maxRstat_invalid_index, i def check_maxRstat_invalid_index(self, i): # Tests maxRstat(Z, R, i). Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) if isinstance(i, int): assert_raises(ValueError, maxRstat, Z, R, i) else: assert_raises(TypeError, maxRstat, Z, R, i) def test_maxRstat_empty_linkage(self): for i in range(4): yield self.check_maxRstat_empty_linkage, i def check_maxRstat_empty_linkage(self, i): # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) R = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxRstat, Z, R, i) def test_maxRstat_difrow_linkage(self): for i in range(4): yield self.check_maxRstat_difrow_linkage, i def check_maxRstat_difrow_linkage(self, i): # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with # different numbers of clusters. Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.random.rand(2, 4) assert_raises(ValueError, maxRstat, Z, R, i) def test_maxRstat_one_cluster_linkage(self): for i in range(4): yield self.check_maxRstat_one_cluster_linkage, i def check_maxRstat_one_cluster_linkage(self, i): # Tests maxRstat(Z, R, i) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) MD = maxRstat(Z, R, 1) expectedMD = calculate_maximum_inconsistencies(Z, R, 1) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxRstat_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: for i in range(4): yield self.check_maxRstat_Q_linkage, method, i def check_maxRstat_Q_linkage(self, method, i): # Tests maxRstat(Z, R, i) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) R = inconsistent(Z) MD = maxRstat(Z, R, 1) expectedMD = calculate_maximum_inconsistencies(Z, R, 1) assert_allclose(MD, expectedMD, atol=1e-15) class TestDendrogram(object): def test_dendrogram_single_linkage_tdist(self): # Tests dendrogram calculation on single linkage of the tdist data set. Z = linkage(hierarchy_test_data.ytdist, 'single') R = dendrogram(Z, no_plot=True) leaves = R["leaves"] assert_equal(leaves, [2, 5, 1, 0, 3, 4]) def test_valid_orientation(self): Z = linkage(hierarchy_test_data.ytdist, 'single') assert_raises(ValueError, dendrogram, Z, orientation="foo") @dec.skipif(not have_matplotlib) def test_dendrogram_plot(self): for orientation in ['top', 'bottom', 'left', 'right']: yield self.check_dendrogram_plot, orientation def check_dendrogram_plot(self, orientation): # Tests dendrogram plotting. Z = linkage(hierarchy_test_data.ytdist, 'single') expected = {'color_list': ['g', 'b', 'b', 'b', 'b'], 'dcoord': [[0.0, 138.0, 138.0, 0.0], [0.0, 219.0, 219.0, 0.0], [0.0, 255.0, 255.0, 219.0], [0.0, 268.0, 268.0, 255.0], [138.0, 295.0, 295.0, 268.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0], [45.0, 45.0, 55.0, 55.0], [35.0, 35.0, 50.0, 50.0], [25.0, 25.0, 42.5, 42.5], [10.0, 10.0, 33.75, 33.75]], 'ivl': ['2', '5', '1', '0', '3', '4'], 'leaves': [2, 5, 1, 0, 3, 4]} fig = plt.figure() ax = fig.add_subplot(111) # test that dendrogram accepts ax keyword R1 = dendrogram(Z, ax=ax, orientation=orientation) plt.close() assert_equal(R1, expected) # test plotting to gca (will import pylab) R2 = dendrogram(Z, orientation=orientation) plt.close() assert_equal(R2, expected) @dec.skipif(not have_matplotlib) def test_dendrogram_truncate_mode(self): Z = linkage(hierarchy_test_data.ytdist, 'single') R = dendrogram(Z, 2, 'lastp', show_contracted=True) plt.close() assert_equal(R, {'color_list': ['b'], 'dcoord': [[0.0, 295.0, 295.0, 0.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0]], 'ivl': ['(2)', '(4)'], 'leaves': [6, 9]}) R = dendrogram(Z, 2, 'mtica', show_contracted=True) plt.close() assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'], 'dcoord': [[0.0, 138.0, 138.0, 0.0], [0.0, 255.0, 255.0, 0.0], [0.0, 268.0, 268.0, 255.0], [138.0, 295.0, 295.0, 268.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0], [35.0, 35.0, 45.0, 45.0], [25.0, 25.0, 40.0, 40.0], [10.0, 10.0, 32.5, 32.5]], 'ivl': ['2', '5', '1', '0', '(2)'], 'leaves': [2, 5, 1, 0, 7]}) def calculate_maximum_distances(Z): # Used for testing correctness of maxdists. n = Z.shape[0] + 1 B = np.zeros((n-1,)) q = np.zeros((3,)) for i in xrange(0, n - 1): q[:] = 0.0 left = Z[i, 0] right = Z[i, 1] if left >= n: q[0] = B[int(left) - n] if right >= n: q[1] = B[int(right) - n] q[2] = Z[i, 2] B[i] = q.max() return B def calculate_maximum_inconsistencies(Z, R, k=3): # Used for testing correctness of maxinconsts. n = Z.shape[0] + 1 B = np.zeros((n-1,)) q = np.zeros((3,)) for i in xrange(0, n - 1): q[:] = 0.0 left = Z[i, 0] right = Z[i, 1] if left >= n: q[0] = B[int(left) - n] if right >= n: q[1] = B[int(right) - n] q[2] = R[i, k] B[i] = q.max() return B def test_euclidean_linkage_value_error(): for method in scipy.cluster.hierarchy._cpy_euclid_methods: assert_raises(ValueError, linkage, [[1, 1], [1, 1]], method=method, metric='cityblock') def test_2x2_linkage(): Z1 = linkage([1], method='single', metric='euclidean') Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean') assert_allclose(Z1, Z2) if __name__ == "__main__": run_module_suite()
gpl-3.0
Universal-Model-Converter/UMC3.0a
data/Python/x86/Lib/site-packages/scipy/misc/common.py
2
14711
""" Functions which are common and require SciPy Base and Level 1 SciPy (special, linalg) """ from __future__ import division, print_function, absolute_import from scipy.lib.six.moves import xrange from numpy import exp, log, asarray, arange, newaxis, hstack, product, array, \ where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, \ r_, rollaxis, sum, fromstring __all__ = ['logsumexp', 'factorial','factorial2','factorialk','comb', 'central_diff_weights', 'derivative', 'pade', 'lena', 'ascent', 'face'] # XXX: the factorial functions could move to scipy.special, and the others # to numpy perhaps? def logsumexp(a, axis=None, b=None): """Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : int, optional Axis over which the sum is taken. By default `axis` is None, and all elements are summed. .. versionadded:: 0.11.0 b : array-like, optional Scaling factor for exp(`a`) must be of the same shape as `a` or broadcastable to `a`. .. versionadded:: 0.12.0 Returns ------- res : ndarray The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` is returned. See Also -------- numpy.logaddexp, numpy.logaddexp2 Notes ----- Numpy has a logaddexp function which is very similar to `logsumexp`, but only handles two arguments. `logaddexp.reduce` is similar to this function, but may be less stable. Examples -------- >>> from scipy.misc import logsumexp >>> a = np.arange(10) >>> np.log(np.sum(np.exp(a))) 9.4586297444267107 >>> logsumexp(a) 9.4586297444267107 With weights >>> a = np.arange(10) >>> b = np.arange(10, 0, -1) >>> logsumexp(a, b=b) 9.9170178533034665 >>> np.log(np.sum(b*np.exp(a))) 9.9170178533034647 """ a = asarray(a) if axis is None: a = a.ravel() else: a = rollaxis(a, axis) a_max = a.max(axis=0) if b is not None: b = asarray(b) if axis is None: b = b.ravel() else: b = rollaxis(b, axis) out = log(sum(b * exp(a - a_max), axis=0)) else: out = log(sum(exp(a - a_max), axis=0)) out += a_max return out def factorial(n,exact=0): """ The factorial function, n! = special.gamma(n+1). If exact is 0, then floating point precision is used, otherwise exact long integer is computed. - Array argument accepted only for exact=0 case. - If n<0, the return value is 0. Parameters ---------- n : int or array_like of ints Calculate ``n!``. Arrays are only supported with `exact` set to False. If ``n < 0``, the return value is 0. exact : bool, optional The result can be approximated rapidly using the gamma-formula above. If `exact` is set to True, calculate the answer exactly using integer arithmetic. Default is False. Returns ------- nf : float or int Factorial of `n`, as an integer or a float depending on `exact`. Examples -------- >>> arr = np.array([3,4,5]) >>> sc.factorial(arr, exact=False) array([ 6., 24., 120.]) >>> sc.factorial(5, exact=True) 120L """ if exact: if n < 0: return 0 val = 1 for k in xrange(1,n+1): val *= k return val else: from scipy import special n = asarray(n) sv = special.errprint(0) vals = special.gamma(n+1) sv = special.errprint(sv) return where(n>=0,vals,0) def factorial2(n, exact=False): """ Double factorial. This is the factorial with every second value skipped, i.e., ``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as:: n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd = 2**(n/2) * (n/2)! n even Parameters ---------- n : int or array_like Calculate ``n!!``. Arrays are only supported with `exact` set to False. If ``n < 0``, the return value is 0. exact : bool, optional The result can be approximated rapidly using the gamma-formula above (default). If `exact` is set to True, calculate the answer exactly using integer arithmetic. Returns ------- nff : float or int Double factorial of `n`, as an int or a float depending on `exact`. Examples -------- >>> factorial2(7, exact=False) array(105.00000000000001) >>> factorial2(7, exact=True) 105L """ if exact: if n < -1: return 0 if n <= 0: return 1 val = 1 for k in xrange(n,0,-2): val *= k return val else: from scipy import special n = asarray(n) vals = zeros(n.shape,'d') cond1 = (n % 2) & (n >= -1) cond2 = (1-(n % 2)) & (n >= -1) oddn = extract(cond1,n) evenn = extract(cond2,n) nd2o = oddn / 2.0 nd2e = evenn / 2.0 place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5)) place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e)) return vals def factorialk(n,k,exact=1): """ n(!!...!) = multifactorial of order k k times Parameters ---------- n : int, array_like Calculate multifactorial. Arrays are only supported with exact set to False. If `n` < 0, the return value is 0. exact : bool, optional If exact is set to True, calculate the answer exactly using integer arithmetic. Returns ------- val : int Multi factorial of `n`. Raises ------ NotImplementedError Raises when exact is False Examples -------- >>> sc.factorialk(5, 1, exact=True) 120L >>> sc.factorialk(5, 3, exact=True) 10L """ if exact: if n < 1-k: return 0 if n<=0: return 1 val = 1 for j in xrange(n,0,-k): val = val*j return val else: raise NotImplementedError def comb(N,k,exact=0): """ The number of combinations of N things taken k at a time. This is often expressed as "N choose k". Parameters ---------- N : int, ndarray Number of things. k : int, ndarray Number of elements taken. exact : int, optional If `exact` is 0, then floating point precision is used, otherwise exact long integer is computed. Returns ------- val : int, ndarray The total number of combinations. Notes ----- - Array arguments accepted only for exact=0 case. - If k > N, N < 0, or k < 0, then a 0 is returned. Examples -------- >>> k = np.array([3, 4]) >>> n = np.array([10, 10]) >>> sc.comb(n, k, exact=False) array([ 120., 210.]) >>> sc.comb(10, 3, exact=True) 120L """ if exact: if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for j in xrange(min(k, N-k)): val = (val*(N-j))//(j+1) return val else: from scipy import special k,N = asarray(k), asarray(N) lgam = special.gammaln cond = (k <= N) & (N >= 0) & (k >= 0) sv = special.errprint(0) vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1)) sv = special.errprint(sv) return where(cond, vals, 0.0) def central_diff_weights(Np, ndiv=1): """ Return weights for an Np-point central derivative. Assumes equally-spaced function points. If weights are in the vector w, then derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx) Parameters ---------- Np : int Number of points for the central derivative. ndiv : int, optional Number of divisions. Default is 1. Notes ----- Can be inaccurate for large number of points. """ if Np < ndiv + 1: raise ValueError("Number of points must be at least the derivative order + 1.") if Np % 2 == 0: raise ValueError("The number of points must be odd.") from scipy import linalg ho = Np >> 1 x = arange(-ho,ho+1.0) x = x[:,newaxis] X = x**0.0 for k in range(1,Np): X = hstack([X,x**k]) w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv] return w def derivative(func, x0, dx=1.0, n=1, args=(), order=3): """ Find the n-th derivative of a function at a point. Given a function, use a central difference formula with spacing `dx` to compute the `n`-th derivative at `x0`. Parameters ---------- func : function Input function. x0 : float The point at which `n`-th derivative is found. dx : int, optional Spacing. n : int, optional Order of the derivative. Default is 1. args : tuple, optional Arguments order : int, optional Number of points to use, must be odd. Notes ----- Decreasing the step size too small can result in round-off error. Examples -------- >>> def x2(x): ... return x*x ... >>> derivative(x2, 2) 4.0 """ if order < n + 1: raise ValueError("'order' (the number of points used to compute the derivative), " "must be at least the derivative order 'n' + 1.") if order % 2 == 0: raise ValueError("'order' (the number of points used to compute the derivative) " "must be odd.") # pre-computed for n=1 and 2 and low-order for speed. if n==1: if order == 3: weights = array([-1,0,1])/2.0 elif order == 5: weights = array([1,-8,0,8,-1])/12.0 elif order == 7: weights = array([-1,9,-45,0,45,-9,1])/60.0 elif order == 9: weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0 else: weights = central_diff_weights(order,1) elif n==2: if order == 3: weights = array([1,-2.0,1]) elif order == 5: weights = array([-1,16,-30,16,-1])/12.0 elif order == 7: weights = array([2,-27,270,-490,270,-27,2])/180.0 elif order == 9: weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0 else: weights = central_diff_weights(order,2) else: weights = central_diff_weights(order, n) val = 0.0 ho = order >> 1 for k in range(order): val += weights[k]*func(x0+(k-ho)*dx,*args) return val / product((dx,)*n,axis=0) def pade(an, m): """ Return Pade approximation to a polynomial as the ratio of two polynomials. Parameters ---------- an : (N,) array_like Taylor series coefficients. m : int The order of the returned approximating polynomials. Returns ------- p, q : Polynomial class The pade approximation of the polynomial defined by `an` is `p(x)/q(x)`. Examples -------- >>> from scipy import misc >>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0] >>> p, q = misc.pade(e_exp, 2) >>> e_exp.reverse() >>> e_poly = np.poly1d(e_exp) Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)`` >>> e_poly(1) 2.7166666666666668 >>> p(1)/q(1) 2.7179487179487181 """ from scipy import linalg an = asarray(an) N = len(an) - 1 n = N - m if n < 0: raise ValueError("Order of q <m> must be smaller than len(an)-1.") Akj = eye(N+1, n+1) Bkj = zeros((N+1, m), 'd') for row in range(1, m+1): Bkj[row,:row] = -(an[:row])[::-1] for row in range(m+1, N+1): Bkj[row,:] = -(an[row-m:row])[::-1] C = hstack((Akj, Bkj)) pq = linalg.solve(C, an) p = pq[:n+1] q = r_[1.0, pq[n+1:]] return poly1d(p[::-1]), poly1d(q[::-1]) def lena(): """ Get classic image processing example image, Lena, at 8-bit grayscale bit-depth, 512 x 512 size. Parameters ---------- None Returns ------- lena : ndarray Lena image Examples -------- >>> import scipy.misc >>> lena = scipy.misc.lena() >>> lena.shape (512, 512) >>> lena.max() 245 >>> lena.dtype dtype('int32') >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(lena) >>> plt.show() """ import pickle, os fname = os.path.join(os.path.dirname(__file__),'lena.dat') f = open(fname,'rb') lena = array(pickle.load(f)) f.close() return lena def ascent(): """ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos The image is derived from accent-to-the-top.jpg at http://www.public-domain-image.com/people-public-domain-images-pictures/ Parameters ---------- None Returns ------- ascent : ndarray convenient image to use for testing and demonstration Examples -------- >>> import scipy.misc >>> ascent = scipy.misc.ascent() >>> ascent.shape (512, 512) >>> ascent.max() 255 >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(ascent) >>> plt.show() """ import pickle, os fname = os.path.join(os.path.dirname(__file__),'ascent.dat') f = open(fname,'rb') ascent = array(pickle.load(f)) f.close() return ascent def face(gray=False): """ Get a 1024 x 768, color image of a raccoon face. raccoon-procyon-lotor.jpg at http://www.public-domain-image.com Parameters ---------- gray : bool, optional If True then return color image, otherwise return an 8-bit gray-scale Returns ------- face : ndarray image of a racoon face Examples -------- >>> import scipy.misc >>> face = scipy.misc.face() >>> face.shape (768, 1024, 3) >>> face.max() 230 >>> face.dtype dtype('uint8') >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(face) >>> plt.show() """ import bz2, os rawdata = open(os.path.join(os.path.dirname(__file__), 'face.dat')).read() data = bz2.decompress(rawdata) face = fromstring(data, dtype='uint8') face.shape = (768, 1024, 3) if gray is True: face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8') return face
mit
kiyoto/statsmodels
statsmodels/sandbox/panel/mixed.py
31
21019
""" Mixed effects models Author: Jonathan Taylor Author: Josef Perktold License: BSD-3 Notes ------ It's pretty slow if the model is misspecified, in my first example convergence in loglike is not reached within 2000 iterations. Added stop criteria based on convergence of parameters instead. With correctly specified model, convergence is fast, in 6 iterations in example. """ from __future__ import print_function import numpy as np import numpy.linalg as L from statsmodels.base.model import LikelihoodModelResults from statsmodels.tools.decorators import cache_readonly class Unit(object): """ Individual experimental unit for EM implementation of (repeated measures) mixed effects model. \'Maximum Likelihood Computations with Repeated Measures: Application of the EM Algorithm\' Nan Laird; Nicholas Lange; Daniel Stram Journal of the American Statistical Association, Vol. 82, No. 397. (Mar., 1987), pp. 97-105. Parameters ---------- endog : ndarray, (nobs,) response, endogenous variable exog_fe : ndarray, (nobs, k_vars_fe) explanatory variables as regressors or fixed effects, should include exog_re to correct mean of random coefficients, see Notes exog_re : ndarray, (nobs, k_vars_re) explanatory variables or random effects or coefficients Notes ----- If the exog_re variables are not included in exog_fe, then the mean of the random constants or coefficients are not centered. The covariance matrix of the random parameter estimates are not centered in this case. (That's how it looks to me. JP) """ def __init__(self, endog, exog_fe, exog_re): self.Y = endog self.X = exog_fe self.Z = exog_re self.n = endog.shape[0] def _compute_S(self, D, sigma): """covariance of observations (nobs_i, nobs_i) (JP check) Display (3.3) from Laird, Lange, Stram (see help(Unit)) """ self.S = (np.identity(self.n) * sigma**2 + np.dot(self.Z, np.dot(D, self.Z.T))) def _compute_W(self): """inverse covariance of observations (nobs_i, nobs_i) (JP check) Display (3.2) from Laird, Lange, Stram (see help(Unit)) """ self.W = L.inv(self.S) def compute_P(self, Sinv): """projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing) Display (3.10) from Laird, Lange, Stram (see help(Unit)) W - W X Sinv X' W' """ t = np.dot(self.W, self.X) self.P = self.W - np.dot(np.dot(t, Sinv), t.T) def _compute_r(self, alpha): """residual after removing fixed effects Display (3.5) from Laird, Lange, Stram (see help(Unit)) """ self.r = self.Y - np.dot(self.X, alpha) def _compute_b(self, D): """coefficients for random effects/coefficients Display (3.4) from Laird, Lange, Stram (see help(Unit)) D Z' W r """ self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r)) def fit(self, a, D, sigma): """ Compute unit specific parameters in Laird, Lange, Stram (see help(Unit)). Displays (3.2)-(3.5). """ self._compute_S(D, sigma) #random effect plus error covariance self._compute_W() #inv(S) self._compute_r(a) #residual after removing fixed effects/exogs self._compute_b(D) #? coefficients on random exog, Z ? def compute_xtwy(self): """ Utility function to compute X^tWY (transposed ?) for Unit instance. """ return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ? def compute_xtwx(self): """ Utility function to compute X^tWX for Unit instance. """ return np.dot(np.dot(self.X.T, self.W), self.X) def cov_random(self, D, Sinv=None): """ Approximate covariance of estimates of random effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Unit)). D - D' Z' P Z D Notes ----- In example where the mean of the random coefficient is not zero, this is not a covariance but a non-centered moment. (proof by example) """ if Sinv is not None: self.compute_P(Sinv) t = np.dot(self.Z, D) return D - np.dot(np.dot(t.T, self.P), t) def logL(self, a, ML=False): """ Individual contributions to the log-likelihood, tries to return REML contribution by default though this requires estimated fixed effect a to be passed as an argument. no constant with pi included a is not used if ML=true (should be a=None in signature) If ML is false, then the residuals are calculated for the given fixed effects parameters a. """ if ML: return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2. else: if a is None: raise ValueError('need fixed effect a for REML contribution to log-likelihood') r = self.Y - np.dot(self.X, a) return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2. def deviance(self, ML=False): '''deviance defined as 2 times the negative loglikelihood ''' return - 2 * self.logL(ML=ML) class OneWayMixed(object): """ Model for EM implementation of (repeated measures) mixed effects model. \'Maximum Likelihood Computations with Repeated Measures: Application of the EM Algorithm\' Nan Laird; Nicholas Lange; Daniel Stram Journal of the American Statistical Association, Vol. 82, No. 397. (Mar., 1987), pp. 97-105. Parameters ---------- units : list of units the data for the individual units should be attached to the units response, fixed and random : formula expression, called as argument to Formula *available results and alias* (subject to renaming, and coversion to cached attributes) params() -> self.a : coefficient for fixed effects or exog cov_params() -> self.Sinv : covariance estimate of fixed effects/exog bse() : standard deviation of params cov_random -> self.D : estimate of random effects covariance params_random_units -> [self.units[...].b] : random coefficient for each unit *attributes* (others) self.m : number of units self.p : k_vars_fixed self.q : k_vars_random self.N : nobs (total) Notes ----- Fit returns a result instance, but not all results that use the inherited methods have been checked. Parameters need to change: drop formula and we require a naming convention for the units (currently Y,X,Z). - endog, exog_fe, endog_re ? logL does not include constant, e.g. sqrt(pi) llf is for MLE not for REML convergence criteria for iteration Currently convergence in the iterative solver is reached if either the loglikelihood *or* the fixed effects parameter don't change above tolerance. In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations while the log likelihood did not converge within 2000 iterations. This might be the case if the fixed effects parameters are well estimated, but there are still changes in the random effects. If params_rtol and params_atol are set at a higher level, then the random effects might not be estimated to a very high precision. The above was with a misspecified model, without a constant. With a correctly specified model convergence is fast, within a few iterations (6 in example). """ def __init__(self, units): self.units = units self.m = len(self.units) self.n_units = self.m self.N = sum(unit.X.shape[0] for unit in self.units) self.nobs = self.N #alias for now # Determine size of fixed effects d = self.units[0].X self.p = d.shape[1] # d.shape = p self.k_exog_fe = self.p #alias for now self.a = np.zeros(self.p, np.float64) # Determine size of D, and sensible initial estimates # of sigma and D d = self.units[0].Z self.q = d.shape[1] # Z.shape = q self.k_exog_re = self.q #alias for now self.D = np.zeros((self.q,)*2, np.float64) self.sigma = 1. self.dev = np.inf #initialize for iterations, move it? def _compute_a(self): """fixed effects parameters Display (3.1) of Laird, Lange, Stram (see help(Mixed)). """ for unit in self.units: unit.fit(self.a, self.D, self.sigma) S = sum([unit.compute_xtwx() for unit in self.units]) Y = sum([unit.compute_xtwy() for unit in self.units]) self.Sinv = L.pinv(S) self.a = np.dot(self.Sinv, Y) def _compute_sigma(self, ML=False): """ Estimate sigma. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.8). sigma is the standard deviation of the noise (residual) """ sigmasq = 0. for unit in self.units: if ML: W = unit.W else: unit.compute_P(self.Sinv) W = unit.P t = unit.r - np.dot(unit.Z, unit.b) sigmasq += np.power(t, 2).sum() sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) - self.sigma**2 * W) self.sigma = np.sqrt(sigmasq / self.N) def _compute_D(self, ML=False): """ Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)), otherwise it corresponds to (3.9). """ D = 0. for unit in self.units: if ML: W = unit.W else: unit.compute_P(self.Sinv) W = unit.P D += np.multiply.outer(unit.b, unit.b) t = np.dot(unit.Z, self.D) D += self.D - np.dot(np.dot(t.T, W), t) self.D = D / self.m def cov_fixed(self): """ Approximate covariance of estimates of fixed effects. Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)). """ return self.Sinv #----------- alias (JP) move to results class ? def cov_random(self): """ Estimate random effects covariance D. If ML is True, return the ML estimate of sigma, else return the REML estimate. see _compute_D, alias for self.D """ return self.D @property def params(self): ''' estimated coefficients for exogeneous variables or fixed effects see _compute_a, alias for self.a ''' return self.a @property def params_random_units(self): '''random coefficients for each unit ''' return np.array([unit.b for unit in self.units]) def cov_params(self): ''' estimated covariance for coefficients for exogeneous variables or fixed effects see cov_fixed, and Sinv in _compute_a ''' return self.cov_fixed() @property def bse(self): ''' standard errors of estimated coefficients for exogeneous variables (fixed) ''' return np.sqrt(np.diag(self.cov_params())) #----------- end alias def deviance(self, ML=False): '''deviance defined as 2 times the negative loglikelihood ''' return -2 * self.logL(ML=ML) def logL(self, ML=False): """ Return log-likelihood, REML by default. """ #I don't know what the difference between REML and ML is here. logL = 0. for unit in self.units: logL += unit.logL(a=self.a, ML=ML) if not ML: logL += np.log(L.det(self.Sinv)) / 2 return logL def initialize(self): S = sum([np.dot(unit.X.T, unit.X) for unit in self.units]) Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units]) self.a = L.lstsq(S, Y)[0] D = 0 t = 0 sigmasq = 0 for unit in self.units: unit.r = unit.Y - np.dot(unit.X, self.a) if self.q > 1: unit.b = L.lstsq(unit.Z, unit.r)[0] else: Z = unit.Z.reshape((unit.Z.shape[0], 1)) unit.b = L.lstsq(Z, unit.r)[0] sigmasq += (np.power(unit.Y, 2).sum() - (self.a * np.dot(unit.X.T, unit.Y)).sum() - (unit.b * np.dot(unit.Z.T, unit.r)).sum()) D += np.multiply.outer(unit.b, unit.b) t += L.pinv(np.dot(unit.Z.T, unit.Z)) #TODO: JP added df_resid check self.df_resid = (self.N - (self.m - 1) * self.q - self.p) sigmasq /= (self.N - (self.m - 1) * self.q - self.p) self.sigma = np.sqrt(sigmasq) self.D = (D - sigmasq * t) / self.m def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4): '''convergence check for iterative estimation ''' self.dev, old = self.deviance(ML=ML), self.dev #self.history.append(np.hstack((self.dev, self.a))) self.history['llf'].append(self.dev) self.history['params'].append(self.a.copy()) self.history['D'].append(self.D.copy()) if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`? #print np.fabs((self.dev - old)), self.dev, old self.termination = 'llf' return False #break if parameters converged #TODO: check termination conditions, OR or AND if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)): self.termination = 'params' return False self._a_old = self.a.copy() return True def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6): #initialize for convergence criteria self._a_old = np.inf * self.a self.history = {'llf':[], 'params':[], 'D':[]} for i in range(maxiter): self._compute_a() #a, Sinv : params, cov_params of fixed exog self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ? self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol, params_atol=params_atol): break else: #if end of loop is reached without break self.termination = 'maxiter' print('Warning: maximum number of iterations reached') self.iterations = i results = OneWayMixedResults(self) #compatibility functions for fixed effects/exog results.scale = 1 results.normalized_cov_params = self.cov_params() return results class OneWayMixedResults(LikelihoodModelResults): '''Results class for OneWayMixed models ''' def __init__(self, model): #TODO: check, change initialization to more standard pattern self.model = model self.params = model.params #need to overwrite this because we don't have a standard #model.loglike yet #TODO: what todo about REML loglike, logL is not normalized @cache_readonly def llf(self): return self.model.logL(ML=True) @property def params_random_units(self): return self.model.params_random_units def cov_random(self): return self.model.cov_random() def mean_random(self, idx='lastexog'): if idx == 'lastexog': meanr = self.params[-self.model.k_exog_re:] elif isinstance(idx, list): if not len(idx) == self.model.k_exog_re: raise ValueError('length of idx different from k_exog_re') else: meanr = self.params[idx] else: meanr = np.zeros(self.model.k_exog_re) return meanr def std_random(self): return np.sqrt(np.diag(self.cov_random())) def plot_random_univariate(self, bins=None, use_loc=True): '''create plot of marginal distribution of random effects Parameters ---------- bins : int or bin edges option for bins in matplotlibs hist method. Current default is not very sophisticated. All distributions use the same setting for bins. use_loc : bool If True, then the distribution with mean given by the fixed effect is used. Returns ------- fig : matplotlib figure instance figure with subplots Notes ----- What can make this fancier? Bin edges will not make sense if loc or scale differ across random effect distributions. ''' #outsource this import matplotlib.pyplot as plt from scipy.stats import norm as normal fig = plt.figure() k = self.model.k_exog_re if k > 3: rows, cols = int(np.ceil(k * 0.5)), 2 else: rows, cols = k, 1 if bins is None: #bins = self.model.n_units // 20 #TODO: just roughly, check # bins = np.sqrt(self.model.n_units) bins = 5 + 2 * self.model.n_units**(1./3.) if use_loc: loc = self.mean_random() else: loc = [0]*k scale = self.std_random() for ii in range(k): ax = fig.add_subplot(rows, cols, ii) freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii], bins=bins, normed=True) points = np.linspace(bins_[0], bins_[-1], 200) #ax.plot(points, normal.pdf(points, loc=loc, scale=scale)) #loc of sample is approx. zero, with Z appended to X #alternative, add fixed to mean ax.set_title('Random Effect %d Marginal Distribution' % ii) ax.plot(points, normal.pdf(points, loc=loc[ii], scale=scale[ii]), 'r') return fig def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None): '''create scatter plot of two random effects Parameters ---------- idx1, idx2 : int indices of the two random effects to display, corresponding to columns of exog_re title : None or string If None, then a default title is added ax : None or matplotlib axis instance If None, then a figure with one axis is created and returned. If ax is not None, then the scatter plot is created on it, and this axis instance is returned. Returns ------- ax_or_fig : axis or figure instance see ax parameter Notes ----- Still needs ellipse from estimated parameters ''' import matplotlib.pyplot as plt if ax is None: fig = plt.figure() ax = fig.add_subplot(1,1,1) ax_or_fig = fig re1 = self.params_random_units[:,idx1] re2 = self.params_random_units[:,idx2] ax.plot(re1, re2, 'o', alpha=0.75) if title is None: title = 'Random Effects %d and %d' % (idx1, idx2) ax.set_title(title) ax_or_fig = ax return ax_or_fig def plot_scatter_all_pairs(self, title=None): from statsmodels.graphics.plot_grids import scatter_ellipse if self.model.k_exog_re < 2: raise ValueError('less than two variables available') return scatter_ellipse(self.params_random_units, ell_kwds={'color':'r'}) #ell_kwds not implemented yet # #note I have written this already as helper function, get it # import matplotlib.pyplot as plt # #from scipy.stats import norm as normal # fig = plt.figure() # k = self.model.k_exog_re # n_plots = k * (k - 1) // 2 # if n_plots > 3: # rows, cols = int(np.ceil(n_plots * 0.5)), 2 # else: # rows, cols = n_plots, 1 # # count = 1 # for ii in range(k): # for jj in range(ii): # ax = fig.add_subplot(rows, cols, count) # self.plot_scatter_pairs(ii, jj, title=None, ax=ax) # count += 1 # # return fig if __name__ == '__main__': #see examples/ex_mixed_lls_1.py pass
bsd-3-clause
ElDeveloper/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
408
8061
import re import inspect import textwrap import pydoc from .docscrape import NumpyDocString from .docscrape import FunctionDoc from .docscrape import ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' ' * indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default', '')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: out += ['.. latexonly::', ''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Methods',): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
Garrett-R/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
Titan-C/scikit-learn
examples/cluster/plot_ward_structured_vs_unstructured.py
1
3369
""" =========================================================== Hierarchical clustering: structured vs unstructured ward =========================================================== Example builds a swiss roll dataset and runs hierarchical clustering on their position. For more information, see :ref:`hierarchical_clustering`. In a first step, the hierarchical clustering is performed without connectivity constraints on the structure and is solely based on distance, whereas in a second step the clustering is restricted to the k-Nearest Neighbors graph: it's a hierarchical clustering with structure prior. Some of the clusters learned without connectivity constraints do not respect the structure of the swiss roll and extend across different folds of the manifolds. On the opposite, when opposing connectivity constraints, the clusters form a nice parcellation of the swiss roll. """ # Authors : Vincent Michel, 2010 # Alexandre Gramfort, 2010 # Gael Varoquaux, 2010 # License: BSD 3 clause print(__doc__) import time as time import numpy as np import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from sklearn.cluster import AgglomerativeClustering from sklearn.datasets.samples_generator import make_swiss_roll # ############################################################################# # Generate data (swiss roll dataset) n_samples = 1500 noise = 0.05 X, _ = make_swiss_roll(n_samples, noise) # Make it thinner X[:, 1] *= .5 # ############################################################################# # Compute clustering print("Compute unstructured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) # ############################################################################# # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(np.float(l) / np.max(label + 1))) plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time) # ############################################################################# # Define the structure A of the data. Here a 10 nearest neighbors from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) # ############################################################################# # Compute clustering print("Compute structured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) # ############################################################################# # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(float(l) / np.max(label + 1))) plt.title('With connectivity constraints (time %.2fs)' % elapsed_time) plt.show()
bsd-3-clause
petebachant/PXL
pxl/tests/test_fdiff.py
1
1436
from __future__ import division, print_function from .. import fdiff from ..fdiff import * import matplotlib.pyplot as plt import pandas as pd import os import numpy as np from uncertainties import unumpy plot = False def test_second_order_diff(): """Test `second_order_diff`.""" # Create a non-equally spaced x vector x = np.append(np.linspace(0, np.pi, 100), np.linspace(np.pi + 0.01, 2*np.pi, 400)) u = np.sin(x) dudx = second_order_diff(u, x) assert dudx.shape == u.shape # Assert that this function is almost identical to cos(x) np.testing.assert_allclose(dudx, np.cos(x), rtol=1e-3) if plot: plt.plot(x, dudx, "-o", lw=2, alpha=0.5) plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5) plt.show() def test_second_order_diff_uncertainties(): """Test that `second_order_diff` works with uncertainties.""" # Create a non-equally spaced x vector x = np.append(np.linspace(0, np.pi, 50), np.linspace(np.pi + 0.01, 2*np.pi, 100)) x_unc = unumpy.uarray(x, np.ones(len(x))*1e-3) u = unumpy.uarray(np.sin(x), np.ones(len(x))*1e-2) dudx = second_order_diff(u, x) print(dudx[:5]) print(dudx[-5:]) if plot: plt.errorbar(x, unumpy.nominal_values(dudx), yerr=unumpy.std_devs(dudx), fmt="-o", lw=2, alpha=0.5) plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5) plt.show()
gpl-3.0
loli/sklearn-ensembletrees
examples/plot_rfe_with_cross_validation.py
24
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores import matplotlib.pyplot as plt plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
tomlof/scikit-learn
examples/manifold/plot_lle_digits.py
138
8594
""" ============================================================================= Manifold learning on handwritten digits: Locally Linear Embedding, Isomap... ============================================================================= An illustration of various embeddings on the digits dataset. The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not technically a manifold embedding method, as it learn a high-dimensional representation on which we apply a dimensionality reduction method. However, it is often useful to cast a dataset into a representation in which the classes are linearly-separable. t-SNE will be initialized with the embedding that is generated by PCA in this example, which is not the default setting. It ensures global stability of the embedding, i.e., the embedding does not depend on random initialization. """ # Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Gael Varoquaux # License: BSD 3 clause (C) INRIA 2011 print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from matplotlib import offsetbox from sklearn import (manifold, datasets, decomposition, ensemble, discriminant_analysis, random_projection) digits = datasets.load_digits(n_class=6) X = digits.data y = digits.target n_samples, n_features = X.shape n_neighbors = 30 #---------------------------------------------------------------------- # Scale and visualize the embedding vectors def plot_embedding(X, title=None): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure() ax = plt.subplot(111) for i in range(X.shape[0]): plt.text(X[i, 0], X[i, 1], str(digits.target[i]), color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0 shown_images = np.array([[1., 1.]]) # just something big for i in range(digits.data.shape[0]): dist = np.sum((X[i] - shown_images) ** 2, 1) if np.min(dist) < 4e-3: # don't show points that are too close continue shown_images = np.r_[shown_images, [X[i]]] imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), X[i]) ax.add_artist(imagebox) plt.xticks([]), plt.yticks([]) if title is not None: plt.title(title) #---------------------------------------------------------------------- # Plot images of the digits n_img_per_row = 20 img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row)) for i in range(n_img_per_row): ix = 10 * i + 1 for j in range(n_img_per_row): iy = 10 * j + 1 img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8)) plt.imshow(img, cmap=plt.cm.binary) plt.xticks([]) plt.yticks([]) plt.title('A selection from the 64-dimensional digits dataset') #---------------------------------------------------------------------- # Random 2D projection using a random unitary matrix print("Computing random projection") rp = random_projection.SparseRandomProjection(n_components=2, random_state=42) X_projected = rp.fit_transform(X) plot_embedding(X_projected, "Random Projection of the digits") #---------------------------------------------------------------------- # Projection on to the first 2 principal components print("Computing PCA projection") t0 = time() X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X) plot_embedding(X_pca, "Principal Components projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Projection on to the first 2 linear discriminant components print("Computing Linear Discriminant Analysis projection") X2 = X.copy() X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible t0 = time() X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y) plot_embedding(X_lda, "Linear Discriminant projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Isomap projection of the digits dataset print("Computing Isomap embedding") t0 = time() X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X) print("Done.") plot_embedding(X_iso, "Isomap projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Locally linear embedding of the digits dataset print("Computing LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='standard') t0 = time() X_lle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_lle, "Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Modified Locally linear embedding of the digits dataset print("Computing modified LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='modified') t0 = time() X_mlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_mlle, "Modified Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # HLLE embedding of the digits dataset print("Computing Hessian LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='hessian') t0 = time() X_hlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_hlle, "Hessian Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # LTSA embedding of the digits dataset print("Computing LTSA embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='ltsa') t0 = time() X_ltsa = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_ltsa, "Local Tangent Space Alignment of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # MDS embedding of the digits dataset print("Computing MDS embedding") clf = manifold.MDS(n_components=2, n_init=1, max_iter=100) t0 = time() X_mds = clf.fit_transform(X) print("Done. Stress: %f" % clf.stress_) plot_embedding(X_mds, "MDS embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Random Trees embedding of the digits dataset print("Computing Totally Random Trees embedding") hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5) t0 = time() X_transformed = hasher.fit_transform(X) pca = decomposition.TruncatedSVD(n_components=2) X_reduced = pca.fit_transform(X_transformed) plot_embedding(X_reduced, "Random forest embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Spectral embedding of the digits dataset print("Computing Spectral embedding") embedder = manifold.SpectralEmbedding(n_components=2, random_state=0, eigen_solver="arpack") t0 = time() X_se = embedder.fit_transform(X) plot_embedding(X_se, "Spectral embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # t-SNE embedding of the digits dataset print("Computing t-SNE embedding") tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) t0 = time() X_tsne = tsne.fit_transform(X) plot_embedding(X_tsne, "t-SNE embedding of the digits (time %.2fs)" % (time() - t0)) plt.show()
bsd-3-clause
mne-tools/mne-tools.github.io
0.21/_downloads/33b5e3cff5c172d72c79c6eec192b031/plot_label_from_stc.py
20
4093
""" ================================================= Generate a functional label from source estimates ================================================= Threshold source estimates and produce a functional label. The label is typically the region of interest that contains high values. Here we compare the average time course in the anatomical label obtained by FreeSurfer segmentation and the average time course from the functional label. As expected the time course in the functional label yields higher values. """ # Author: Luke Bloy <luke.bloy@gmail.com> # Alex Gramfort <alexandre.gramfort@inria.fr> # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.minimum_norm import read_inverse_operator, apply_inverse from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' subjects_dir = data_path + '/subjects' subject = 'sample' snr = 3.0 lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Compute a label/ROI based on the peak power between 80 and 120 ms. # The label bankssts-lh is used for the comparison. aparc_label_name = 'bankssts-lh' tmin, tmax = 0.080, 0.120 # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) src = inverse_operator['src'] # get the source space # Compute inverse solution stc = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori='normal') # Make an STC in the time interval of interest and take the mean stc_mean = stc.copy().crop(tmin, tmax).mean() # use the stc_mean to generate a functional label # region growing is halted at 60% of the peak value within the # anatomical label / ROI specified by aparc_label_name label = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=subjects_dir, regexp=aparc_label_name)[0] stc_mean_label = stc_mean.in_label(label) data = np.abs(stc_mean_label.data) stc_mean_label.data[data < 0.6 * np.max(data)] = 0. # 8.5% of original source space vertices were omitted during forward # calculation, suppress the warning here with verbose='error' func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True, subjects_dir=subjects_dir, connected=True, verbose='error') # take first as func_labels are ordered based on maximum values in stc func_label = func_labels[0] # load the anatomical ROI for comparison anat_label = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=subjects_dir, regexp=aparc_label_name)[0] # extract the anatomical time course for each label stc_anat_label = stc.in_label(anat_label) pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0] stc_func_label = stc.in_label(func_label) pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0] # flip the pca so that the max power between tmin and tmax is positive pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))]) pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))]) ############################################################################### # plot the time courses.... plt.figure() plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k', label='Anatomical %s' % aparc_label_name) plt.plot(1e3 * stc_func_label.times, pca_func, 'b', label='Functional %s' % aparc_label_name) plt.legend() plt.show() ############################################################################### # plot brain in 3D with PySurfer if available brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir) brain.show_view('lateral') # show both labels brain.add_label(anat_label, borders=True, color='k') brain.add_label(func_label, borders=True, color='b')
bsd-3-clause
latticelabs/Mitty
mitty/benchmarking/misalignment_plot.py
1
9184
"""Prepare a binned matrix of misalignments and plot it in different ways""" import click import pysam import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.path import Path import matplotlib.patches as patches from matplotlib.colors import LogNorm import numpy as np def we_have_too_many_bins(bins): return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute def autoscale_bin_size(chrom_lens, bin_cnt=100.0): return int(sum(chrom_lens) / bin_cnt) def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False): """Create a matrix of binned mis-alignments :param bam_fp: input BAM :param bin_size: size of bin in mega bases :param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins """ def binnify(_pos, _bins): for n in range(1, len(_bins)): if _pos < _bins[n]: return n - 1 return len(_bins) - 1 # Should not get here chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']] bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens) bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']] if not i_know_what_i_am_doing and we_have_too_many_bins(bins): raise RuntimeWarning('The number of bins will be very large. ' 'If you are sure you want to do this, ' 'use the --i-know-what-i-am-doing flag.') bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins] # Rows = source (correct pos) Cols = destination (aligned pos) matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))] # TAG TYPE VALUE # XR i Aligned chromosome # XP i Aligned pos for r in bam_fp: c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP') c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom]) matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1 return chrom_lens, bins, bin_centers, matrices def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05): """Plot the chromosomes on a circle.""" total_len = sum(chrom_lens) radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps theta_stops, x_ticks, x_tick_labels = [], [], [] delta_radian = 0.01 start_radian = 0 for ch_no, l in enumerate(chrom_lens): end_radian = start_radian + l * radians_per_base theta = np.arange(start_radian, end_radian, delta_radian) theta_stops.append((start_radian, end_radian)) ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3]) x_ticks.append((start_radian + end_radian)/2) x_tick_labels.append(str(ch_no + 1)) start_radian = end_radian + chrom_gap plt.setp(ax.get_yticklabels(), visible=False) ax.grid(False) plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels) ax.set_rmax(r_max) return theta_stops def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=0.01): scaling_factor *= 0.01 # http://matplotlib.org/users/path_tutorial.html codes = [ Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, ] for i in range(len(bins)): for j in range(len(bins)): mat = matrices[i][j] range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j]) offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0] range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0] scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0]) mat_flat = mat.ravel() idx, = mat_flat.nonzero() for ii in idx: t0, t1 = this_origin[ii], this_dest[ii] this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius vertices = [ (t0, chrom_radius), # P0 (t0, chrom_radius - this_radius), # P1 (t1, chrom_radius - this_radius), # P2 (t1, chrom_radius), # P3 ] path = Path(vertices, codes) patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii]) ax.add_patch(patch) def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor): """Plot the confusion matrix as a circle plot.""" fig = plt.figure() ax = fig.add_subplot(111, polar=True) theta_stops = plot_genome_as_a_circle(ax, chrom_lens) plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor) def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5): """Plot the chromosomes on a matrix.""" start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], [] for ch_no, b in enumerate(bins): linear_stops.append([start_pos, start_pos + b[-1]]) ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1) ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1) x_ticks.append((start_pos + start_pos + b[-1]) / 2) x_tick_labels.append(str(ch_no + 1)) start_pos += b[-1] + chrom_gap #plt.setp(ax.get_yticklabels(), visible=False) ax.grid(False) plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels) return linear_stops def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops, scaling_factor=1.0, plot_grid=True): for i in range(len(bins)): for j in range(len(bins)): mat = matrices[i][j] range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j]) offset_x, offset_y = linear_stops[i][0], linear_stops[j][0] range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0] scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0]) if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1) mat_flat = mat.ravel() idx, = mat_flat.nonzero() if idx.size > 0: ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none') def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True): """Plot the confusion matrix as a ... matrix.""" fig = plt.figure() ax = fig.add_subplot(111) linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1) plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops, scaling_factor=scaling_factor, plot_grid=plot_grid) plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned') def is_grid_too_dense(bins): return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show def auto_scale_scaling_factor(matrices, scale=1000.0): return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))]) @click.command() @click.argument('badbam', type=click.Path(exists=True)) @click.option('--circle', type=click.Path(), help='Name of figure file for circle plot') @click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot') @click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale') @click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale') @click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety') def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing): """Prepare a binned matrix of mis-alignments and plot it in different ways""" chrom_lens, bins, bin_centers, matrices = \ compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'), bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing) scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices) if circle is not None: circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor) plt.savefig(circle) if matrix is not None: matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=not is_grid_too_dense(bins)) plt.savefig(matrix) if __name__ == '__main__': cli()
gpl-2.0
idf/scipy_util
scipy_util/image/color_kmeans.py
1
2314
# USAGE # python color_kmeans.py --image images/jp.png --clusters 3 # Author: Adrian Rosebrock # Website: www.pyimagesearch.com # import the necessary packages from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import argparse import cv2 def centroid_histogram(clt): # grab the number of different clusters and create a histogram # based on the number of pixels assigned to each cluster numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1) (hist, _) = np.histogram(clt.labels_, bins = numLabels) # normalize the histogram, such that it sums to one hist = hist.astype("float") hist /= hist.sum() # return the histogram return hist def plot_colors(hist, centroids): # initialize the bar chart representing the relative frequency # of each of the colors bar = np.zeros((50, 300, 3), dtype = "uint8") startX = 0 # loop over the percentage of each cluster and the color of # each cluster for (percent, color) in zip(hist, centroids): # plot the relative percentage of each cluster endX = startX + (percent * 300) cv2.rectangle(bar, (int(startX), 0), (int(endX), 50), color.astype("uint8").tolist(), -1) startX = endX # return the bar chart return bar # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="Path to the image") ap.add_argument("-c", "--clusters", required=True, type=int, help="# of clusters") args = vars(ap.parse_args()) # load the image and convert it from BGR to RGB so that # we can dispaly it with matplotlib image = cv2.imread(args["image"]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # show our image plt.figure() plt.axis("off") plt.imshow(image) # reshape the image to be a list of pixels image = image.reshape((image.shape[0]*image.shape[1], 3)) # cluster the pixel intensities clt = KMeans(n_clusters=args["clusters"]) clt.fit(image) # build a histogram of clusters and then create a figure # representing the number of pixels labeled to each color hist = centroid_histogram(clt) bar = plot_colors(hist, clt.cluster_centers_) # show our color bart plt.figure() plt.axis("off") plt.imshow(bar) plt.show()
bsd-3-clause
robogen/CMS-Mining
RunScripts/es_mainreduce.py
1
20005
from elasticsearch import Elasticsearch import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from matplotlib.dates import AutoDateLocator, AutoDateFormatter import numpy as np import datetime as dt import math import json with open('sites.json', 'r+') as txt: sitesArray = json.load(txt) with open('cms.json', 'r+') as txt: cmsLocate = json.load(txt) with open("config", "r+") as txt: contents = list(map(str.rstrip, txt)) def conAtlasTime(time): return (dt.datetime.strptime(time, '%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp() def utcDate(time): return dt.datetime.fromtimestamp(time, dt.timezone.utc) esAtlas = Elasticsearch([{ 'host': contents[2], 'port': contents[3] }], timeout=50) esHCC = Elasticsearch([{ 'host': contents[0], 'port': contents[1] }], timeout=50) scrollPreserve="3m" startDate = "2016-07-17T00:00:00" endDate = "2016-07-25T00:00:00" tenMin = np.multiply(10,60) loc = {} loc["location"] = np.array([]) def atlasLatency(srcSite, destSite): queryAtlas={"query" : {"bool": { "must": [ {"match" : {"_type" : "latency" } }, {"match" : {"srcSite" : srcSite } }, {"match" : {"destSite" : destSite } }, {"range" : { "timestamp" : { #"gt" : int(conAtlasTime(startDate)), #"lt" : int(conAtlasTime(endDate)) "gt" : startDate, "lt" : endDate } }} ] } } } scannerAtlas = esAtlas.search(index="network_weather_2-*", body=queryAtlas, search_type="scan", scroll=scrollPreserve) scrollIdAtlas = scannerAtlas['_scroll_id'] atlasTotalRec = scannerAtlas["hits"]["total"] arrRet = np.array([]) if atlasTotalRec == 0: return None else: while atlasTotalRec > 0: responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas, scroll=scrollPreserve) for hit in responseAtlas["hits"]["hits"]: tempRay = None # Initialize if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["delay_mean"]), float(hit["_source"]["delay_median"]), float(hit["_source"]["delay_sd"]), float(0.0)]) elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["delay_mean"]), float(hit["_source"]["delay_median"]), float(hit["_source"]["delay_sd"]), float(1.0)]) else: raise NameError('MA is not the src or dest') if arrRet.size == 0: arrRet = np.reshape(tempRay, (1,6)) else: arrRet = np.vstack((arrRet, tempRay)) atlasTotalRec -= len(responseAtlas['hits']['hits']) arrRet.view('f8,f8,f8,f8,f8,f8').sort(order=['f0'], axis=0) return arrRet def atlasPacketLoss(srcSite, destSite): queryAtlas={"query" : {"bool": { "must": [ {"match" : {"_type" : "packet_loss_rate"} }, {"match" : {"srcSite" : srcSite } }, {"match" : {"destSite" : destSite } }, {"range" : { "timestamp" : { #"gt" : int(conAtlasTime(startDate)), #"lt" : int(conAtlasTime(endDate)) "gt" : startDate, "lt" : endDate } }} ] } } } scannerAtlas = esAtlas.search(index="network_weather_2-*", body=queryAtlas, search_type="scan", scroll=scrollPreserve) scrollIdAtlas = scannerAtlas['_scroll_id'] atlasTotalRec = scannerAtlas["hits"]["total"] arrRet = np.array([]) if atlasTotalRec == 0: return None else: while atlasTotalRec > 0: responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas, scroll=scrollPreserve) for hit in responseAtlas["hits"]["hits"]: tempRay = None # Initialize if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["packet_loss"]), float(0.0)]) elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["packet_loss"]), float(1.0)]) else: raise NameError('MA is not src or dest') if arrRet.size == 0: arrRet = np.reshape(tempRay, (1, 4)) else: arrRet = np.vstack((arrRet, tempRay)) atlasTotalRec -= len(responseAtlas['hits']['hits']) arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0) return arrRet def atlasThroughput(srcSite, destSite): queryAtlas={"query" : {"bool": { "must": [ {"match" : {"_type" : "throughput"} }, {"match" : {"srcSite" : srcSite } }, {"match" : {"destSite" : destSite } }, {"range" : { "timestamp" : { #"gt" : int(conAtlasTime(startDate)), #"lt" : int(conAtlasTime(endDate)) "gt" : startDate, "lt" : endDate } }} ] } } } scannerAtlas = esAtlas.search(index="network_weather_2-*", body=queryAtlas, search_type="scan", scroll=scrollPreserve) scrollIdAtlas = scannerAtlas['_scroll_id'] atlasTotalRec = scannerAtlas["hits"]["total"] arrRet = np.array([]) if atlasTotalRec == 0: return None else: while atlasTotalRec > 0: responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas, scroll=scrollPreserve) for hit in responseAtlas["hits"]["hits"]: tempRay = None #Initialize in local context if hit["_source"]["src"] == hit["_source"]["MA"]: # Means MA is the src site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["throughput"]), float(0.0)]) elif hit["_source"]["dest"] == hit["_source"]["MA"]: #Means MA is the dest site tempRay = np.array([#hit["_source"]["timestamp"], #hit["_source"]["timestamp"] conAtlasTime(hit["_source"]["timestamp"]), conAtlasTime(hit["_source"]["timestamp"]) - np.multiply(4, np.multiply(60, 60)), float(hit["_source"]["throughput"]), float(1.0)]) else: raise NameError('MA is not src or dest') if arrRet.size == 0: arrRet = np.reshape(tempRay, (1, 4)) else: arrRet = np.vstack((arrRet, tempRay)) atlasTotalRec -= len(responseAtlas['hits']['hits']) arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0) return arrRet def hccQuery(site): queryHCC={"query" : {"bool": { "must": [ {"match" : {"CMS_JobType" : "Processing"} }, {"range" : {"EventRate" : {"gte" : "0"}} }, #{"match" : # {"TaskType" : "Production"} #}, {"range" : { "CompletionDate" : { "gt" : int(conAtlasTime(startDate)), "lt" : int(conAtlasTime(endDate)) } }}, {"match" : {"DataLocationsCount" : 1} }, {"match" : {"Site" : site } }, {"match" : {"InputData" : "Offsite"} } ] } } } scannerHCC = esHCC.search(index="cms-*", doc_type="job", body=queryHCC, search_type="scan", scroll=scrollPreserve) scrollIdHCC = scannerHCC['_scroll_id'] countHitsHCC = scannerHCC["hits"]["total"] arrRet = {} if countHitsHCC == 0: return None else: while countHitsHCC > 0: responseHCC = esHCC.scroll(scroll_id=scrollIdHCC, scroll=scrollPreserve) for hit in responseHCC["hits"]["hits"]: location = hit["_source"]["DataLocations"] if str(location[0]).lower() in cmsLocate["locations"]: tempHolder = np.array([hit["_source"]["CpuEff"], #hit["_source"]["EventRate"], hit["_source"]["ChirpCMSSWEventRate"], hit["_source"]["JobCurrentStartDate"], hit["_source"]["JobFinishedHookDone"], hit["_source"]["CpuTimeHr"], hit["_source"]["WallClockHr"], hit["_source"]["RequestCpus"], hit["_source"]["MemoryMB"], hit["_source"]["QueueHrs"], hit["_source"]["RequestMemory"], hit["_source"]["CoreHr"], hit["_source"]["CpuBadput"], hit["_source"]["KEvents"]]) if not str(location[0]) in loc["location"]: loc["location"] = np.append(loc["location"], str(location[0])) arrRet[str(location[0])] = np.reshape(tempHolder, (1,13)) else: arrRet[str(location[0])] = np.vstack((arrRet[str(location[0])],tempHolder)) countHitsHCC -= len(responseHCC['hits']['hits']) for hit in arrRet: #print(arrRet) #tempRay = arrRet[str(hit)] #arrRet[str(hit)] = tempRay[tempRay[:,2].argsort()] #arrRet[str(hit)] = sorted(arrRet[str(hit)], key=lambda x : x[2]) arrRet[str(hit)].view('f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8').sort(order=['f2'], axis=0) return arrRet with PdfPages('CMS_Plots.pdf') as pp: d = pp.infodict() d['Title'] = 'CMS Grid Plots' d['Author'] = u'Jerrod T. Dixon\xe4nen' d['Subject'] = 'Plot of network affects on grid jobs' d['Keywords'] = 'PdfPages matplotlib CMS grid' d['CreationDate'] = dt.datetime.today() d['ModDate'] = dt.datetime.today() for hit in cmsLocate["locations"]: loc["location"] = np.array([]) hccResult = hccQuery(hit) for note in loc["location"]: atlasT = None #atlasThroughput(sitesArray[hit], sitesArray[note.lower()]) atlasP = atlasPacketLoss(sitesArray[hit], sitesArray[note.lower()]) atlasL = atlasLatency(sitesArray[hit], sitesArray[note.lower()]) tempArr = hccResult[note] arrCpu = np.array([]); arrEvent = np.array([]); arrStart = np.array([]); arrEnd = np.array([]); for tpl in tempArr: arrCpu = np.append(arrCpu, tpl[0]); arrEvent = np.append(arrEvent, tpl[1]); arrStart = np.append(arrStart, utcDate(tpl[2])); arrEnd = np.append(arrEnd, utcDate(tpl[3])); figH, axH = plt.subplots(2, sharex=True) axH[1].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(), defaultfmt="%m-%d %H:%M")) figH.autofmt_xdate(bottom=0.2, rotation=30, ha='right') axH[0].plot(arrStart, arrCpu, 'bs') axH[0].hlines(arrCpu, arrStart, arrEnd) axH[0].set_ylabel("CpuEff") axH[0].set_title(str("2016 From " + hit + " To " + note)) axH[1].plot(arrStart, arrEvent, 'bs') axH[1].hlines(arrEvent, arrStart, arrEnd) axH[1].set_ylabel("EventRate") pp.savefig(figH) plt.close(figH) #axA[2].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(), # defaultfmt="%m-%d %H:%M")) if not type(atlasP) == type(None): #tDate = np.array([]) #tDatef = np.array([]) #tPut = np.array([]) pDate = np.array([]) pDatef = np.array([]) pLoss = np.array([]) #for tpl in atlasT: # tDate = np.append(tDate, tpl[0]) # tDatef = np.append(tDatef, tpl[1]) # tPut = np.append(tPut, tpl[2]) for tpl in atlasP: pDate = np.append(pDate, tpl[0]) pDatef = np.append(pDatef, tpl[1]) pLoss = np.append(pLoss, tpl[2]) figA, axA = plt.subplots(2, sharex=True) axA[0].set_title(str("2016 From " + \ hit + " (" + \ sitesArray[hit] + \ ")" + " To " + \ note + " (" + sitesArray[note.lower()] + ")")) figA.autofmt_xdate(bottom=0.2, rotation=30, ha='right') axA[0].plot(pDate, pLoss, 'bs') axA[0].set_ylabel("Packet Loss") axA[0].hlines(pLoss, pDatef, pDate) #axA[1].set_ylabel("Throughput") #axA[1].plot(tDate, tPut, 'bs') #axA[1].hlines(tPut, # tDatef, # tDate) pp.savefig(figA) plt.close(figA) if not type(atlasL) == type(None): lDate = np.array([]) lDatef = np.array([]) lMean = np.array([]) lMedian = np.array([]) lStd = np.array([]) for tpl in atlasL: lDate = np.append(lDate, tpl[0]) lDatef = np.append(lDatef, tpl[1]) lMean = np.append(lMean, tpl[2]) lMedian = np.append(lMedian, tpl[3]) lStd = np.append(lStd, tpl[4]) figL, axL = plt.subplots(3, sharex=True) axL[0].set_title(str("2016 Latency From " + \ hit + " (" + \ sitesArray[hit] + \ ")" + " To " + \ note + " (" + sitesArray[note.lower()] + ")")) figL.autofmt_xdate(bottom=0.2, rotation=30, ha='right') axL[0].set_ylabel("Mean") axL[0].plot(lDate, lMean, 'bs', label="delay_mean") axL[0].hlines(lMean, lDatef, lDate) axL[1].set_ylabel("Median") axL[1].plot(lDate, lMedian, 'rs', label="delay_median") axL[1].hlines(lMedian, lDatef, lDate) axL[2].set_ylabel("Std. Dev") axL[2].plot(lDate, lStd, 'g^', label="delay_sd") axL[2].hlines(lStd, lDatef, lDate) pp.savefig(figL) plt.close(figL)
mit
psci2195/espresso-ffans
samples/lb_profile.py
1
2856
# Copyright (C) 2010-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Simulate the flow of a lattice-Boltzmann fluid past a cylinder, obtain the velocity profile in polar coordinates and compare it to the analytical solution. """ import numpy as np import matplotlib.pyplot as plt import espressomd required_features = ["CUDA", "LB_BOUNDARIES_GPU"] espressomd.assert_features(required_features) import espressomd.lb import espressomd.observables import espressomd.shapes import espressomd.lbboundaries import espressomd.accumulators system = espressomd.System(box_l=[10.0, 10.0, 5.0]) system.time_step = 0.01 system.cell_system.skin = 0.4 n_steps = 500 lb_fluid = espressomd.lb.LBFluidGPU( agrid=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force_density=[0, 0, 0.15], kT=1.0, seed=32) system.actors.add(lb_fluid) system.thermostat.set_lb(LB_fluid=lb_fluid, seed=23) fluid_obs = espressomd.observables.CylindricalLBVelocityProfile( center=[5.0, 5.0, 0.0], axis='z', n_r_bins=100, n_phi_bins=1, n_z_bins=1, min_r=0.0, max_r=4.0, min_phi=-np.pi, max_phi=np.pi, min_z=0.0, max_z=10.0, sampling_delta_x=0.05, sampling_delta_y=0.05, sampling_delta_z=1.0) cylinder_shape = espressomd.shapes.Cylinder( center=[5.0, 5.0, 5.0], axis=[0, 0, 1], direction=-1, radius=4.0, length=20.0) cylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape) system.lbboundaries.add(cylinder_boundary) system.integrator.run(n_steps) accumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs) system.auto_update_accumulators.add(accumulator) system.integrator.run(n_steps) lb_fluid_profile = accumulator.get_mean() lb_fluid_profile = np.reshape(lb_fluid_profile, (100, 1, 1, 3)) def poiseuille_flow(r, R, ext_force_density): return ext_force_density * 1. / 4 * (R**2.0 - r**2.0) # Please note that due to symmetry and interpolation, a plateau is seen # near r=0. n_bins = len(lb_fluid_profile[:, 0, 0, 2]) r_max = 4.0 r = np.linspace(0.0, r_max, n_bins) plt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile') plt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution') plt.legend() plt.show()
gpl-3.0
adamgreenhall/scikit-learn
examples/linear_model/plot_logistic_path.py
349
1195
#!/usr/bin/env python """ ================================= Path with L1- Logistic Regression ================================= Computes path on IRIS dataset. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause from datetime import datetime import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets from sklearn.svm import l1_min_c iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 2] y = y[y != 2] X -= np.mean(X, 0) ############################################################################### # Demo path functions cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3) print("Computing regularization path ...") start = datetime.now() clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6) coefs_ = [] for c in cs: clf.set_params(C=c) clf.fit(X, y) coefs_.append(clf.coef_.ravel().copy()) print("This took ", datetime.now() - start) coefs_ = np.array(coefs_) plt.plot(np.log10(cs), coefs_) ymin, ymax = plt.ylim() plt.xlabel('log(C)') plt.ylabel('Coefficients') plt.title('Logistic Regression Path') plt.axis('tight') plt.show()
bsd-3-clause
to266/hyperspy
hyperspy/drawing/marker.py
2
4900
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import numpy as np import matplotlib.pyplot as plt from hyperspy.events import Event, Events class MarkerBase(object): """Marker that can be added to the signal figure Attributes ---------- marker_properties : dictionary Accepts a dictionary of valid (i.e. recognized by mpl.plot) containing valid line properties. In addition it understands the keyword `type` that can take the following values: {'line', 'text'} """ def __init__(self): # Data attributes self.data = None self.axes_manager = None self.ax = None self.auto_update = True # Properties self.marker = None self._marker_properties = {} # Events self.events = Events() self.events.closed = Event(""" Event triggered when a marker is closed. Arguments --------- marker : Marker The marker that was closed. """, arguments=['obj']) self._closing = False @property def marker_properties(self): return self._marker_properties @marker_properties.setter def marker_properties(self, kwargs): for key, item in kwargs.items(): if item is None and key in self._marker_properties: del self._marker_properties[key] else: self._marker_properties[key] = item if self.marker is not None: plt.setp(self.marker, **self.marker_properties) try: # self.ax.figure.canvas.draw() self.ax.hspy_fig._draw_animated() except: pass def set_marker_properties(self, **kwargs): """ Set the line_properties attribute using keyword arguments. """ self.marker_properties = kwargs def set_data(self, x1=None, y1=None, x2=None, y2=None, text=None, size=None): """ Set data to the structured array. Each field of data should have the same dimensions than the nagivation axes. The other fields are overwritten. """ self.data = np.array((np.array(x1), np.array(y1), np.array(x2), np.array(y2), np.array(text), np.array(size)), dtype=[('x1', object), ('y1', object), ('x2', object), ('y2', object), ('text', object), ('size', object)]) self._is_marker_static() def add_data(self, **kwargs): """ Add data to the structured array. Each field of data should have the same dimensions than the nagivation axes. The other fields are not changed. """ if self.data is None: self.set_data(**kwargs) else: for key in kwargs.keys(): self.data[key][()] = np.array(kwargs[key]) self._is_marker_static() def isiterable(self, obj): return not isinstance(obj, (str, bytes)) and hasattr(obj, '__iter__') def _is_marker_static(self): test = [self.isiterable(self.data[key].item()[()]) is False for key in self.data.dtype.names] if np.alltrue(test): self.auto_update = False else: self.auto_update = True def get_data_position(self, ind): data = self.data if data[ind].item()[()] is None: return None elif self.isiterable(data[ind].item()[()]) and self.auto_update: indices = self.axes_manager.indices[::-1] return data[ind].item()[indices] else: return data[ind].item()[()] def close(self): if self._closing: return self._closing = True try: self.marker.remove() self.events.closed.trigger(obj=self) for f in self.events.closed.connected: self.events.closed.disconnect(f) # m.ax.figure.canvas.draw() self.ax.hspy_fig._draw_animated() except: pass
gpl-3.0
JohanComparat/pySU
spm/bin_spiders/spiders_last_burst_vs_radius.py
1
5578
import astropy.cosmology as co aa=co.Planck15 import astropy.io.fits as fits import astropy.units as u from astropy.coordinates import angles #import AngularSeparation from astropy import coordinates as coord import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as p import numpy as n import os import sys import ClusterScalingRelations as clsr from scipy.interpolate import interp1d import StellarMass as sm smhmr = sm.StellarMass() scl = clsr.ClusterScalingRelations_Mantz2016() cat = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1.fits.gz'))[1].data spm = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1_spm.fits'))[1].data volume_rough = aa.comoving_volume(0.5)*2200.*n.pi/129600 volume = volume_rough.value # get cluster center # distance to center # rescale to r200c_deg # get the latest min(ages) of the ssp # compute SFR # now looks at individual galaxies # and gets the highest SFR for each galaxy # youngest age highest_sfrs = [] youngest_ages = [] sep_r200c = [] for cc in cat: center = coord.ICRS(ra=cc['RA_OPT']*u.degree, dec=cc['DEC_OPT']*u.degree) gal = (spm['CLUS_ID']==cc['CLUS_ID']) #all_members = coord.ICRS() #separations = center.separation(all_members)/(cc['R200C_DEG']*u.degree)).value for id_cc, (pla, mjd, fib) in enumerate(zip(cc['ALLPLATE'][:len(gal.nonzero()[0])], cc['ALLMJD'][:len(gal.nonzero()[0])], cc['ALLFIBERID'][:len(gal.nonzero()[0])])): sel = (gal) & (spm['PLATE']==pla) & (spm['MJD']==mjd) & (spm['FIBERID']==fib) if len(sel.nonzero()[0])>0 : n_cp = spm['Chabrier_MILES_nComponentsSSP'][sel].astype('int')[0] if n_cp > 0 : all_ages = n.array([ spm['Chabrier_MILES_age_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ]) all_masses = n.array([ spm['Chabrier_MILES_stellar_mass_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ]) sfr_inst = all_masses / all_ages youngest_ages.append(n.min(all_ages)) highest_sfrs.append(n.max(sfr_inst)) position = coord.ICRS(cc['ALLRA'][id_cc]*u.degree, cc['ALLDEC'][id_cc]*u.degree) sep_r200c.append( (center.separation(position)/(cc['R200C_DEG']*u.degree)).value ) highest_sfrs = n.array(highest_sfrs) youngest_ages = n.array(youngest_ages) sep_r200c = n.array(sep_r200c) p.figure(1, (5,5)) p.title('SPIDERS') p.plot(sep_r200c, highest_sfrs, 'r+') p.xlabel('r/r200c') p.ylabel('SFR [Msun/yr]') #p.xscale('log') p.yscale('log') p.xlim((0.08,1.5)) p.grid() p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR.png')) p.clf() dx = ( n.max(sep_r200c) - n.min(sep_r200c) ) /3. r_b = n.arange(n.min(sep_r200c), n.max(sep_r200c) + dx, dx) p.figure(1, (5,5)) for ii,bb in enumerate(r_b[:-1]): sub = (sep_r200c>bb)&(sep_r200c<r_b[ii+1]) p.hist(highest_sfrs[sub], label=str(n.round(bb,3))+"<"+str(n.round(r_b[ii+1],3)), cumulative=True, normed=True, histtype='step') p.ylabel('normed cumulative distribution') p.xlabel('SFR [Msun/yr]') p.xscale('log') p.ylim((-0.01, 1.01)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR-histograms.png')) p.clf() p.figure(1, (5,5)) p.title('SPIDERS') p.plot(sep_r200c, youngest_ages, 'r+') p.xlabel('r/r200c') p.ylabel('age [yr]') p.xscale('log') p.yscale('log') p.xlim((0.1,5)) p.grid() p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-AGE.png')) p.clf() p.figure(1, (5,5)) p.title('SPIDERS DR14 galaxies') p.plot(spm['Z'], spm["Chabrier_MILES_stellar_mass"], 'b,', label='targets') p.plot(z, y, 'r,', label='cluster members') p.xlabel('redshift') p.ylabel('stellar mass [Msun]') #p.xscale('log') p.yscale('log') p.xlim((0,0.7)) p.ylim((1e9,1e12)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'redshift-mass.png')) p.clf() logm2x = n.hstack((m2x)) bins=n.arange(-7, 0.5, 0.1) basis = (n.isnan(logm2x)==False)&(logm2x != -n.inf)&(logm2x != n.inf) arbitrary_factor =5. p.figure(1, (5,5)) ok = (basis)&(x>1e44) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44') ok = (basis)&(x>10**44.5) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44.5') ok = (basis)&(x>1e45) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>45') ok = (basis)&(m200c>10**14) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>14', ls='dashed') ok = (basis)&(m200c>10**15) out = n.log10(n.histogram(logm2x[ok], bins=bins)[0]) p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>15', ls='dashed') xs = n.arange(-7, 0.01, 0.01) logfsat= lambda logxi, a, b, logN0, exponent : n.log10( 10**logN0 * (10**logxi)**a)# * n.e**(-b*(10**logxi)**exponent)) p.plot(xs, logfsat(xs, -0.81, 5.81, -2.25, -2.54), label='-0.81') p.plot(xs, logfsat(xs, -0.18, 5.81, -1.2, -.54), label='-0.18') p.xlabel('log10(SMHMR(stellar mass) / HaloMass(Lx ray))') p.ylabel('histogram') #p.xscale('log') #p.yscale('log') p.ylim((-1.5, 0.5)) p.xlim((-4,0)) p.grid() p.legend(frameon=False, loc=0) p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'LX-mass-histogram.png')) p.clf()
cc0-1.0
ryanpbrewster/SciVis-2015
examples/sdf_example.py
1
2689
""" The Example is from http://darksky.slac.stanford.edu/scivis2015/examples.html """ from sdfpy import load_sdf from thingking import loadtxt prefix = "../data/" # Load N-body particles from a = 1.0 dataset. Particles have positions with # units of proper kpc, and velocities with units of km/s. particles = load_sdf(prefix+"ds14_scivis_0128_e4_dt04_1.0000") # Load the a=1 Rockstar hlist file. The header of the file lists the useful # units/information. scale, id, desc_scale, desc_id, num_prog, pid, upid, desc_pid, phantom, \ sam_mvir, mvir, rvir, rs, vrms, mmp, scale_of_last_MM, vmax, x, y, z, \ vx, vy, vz, Jx, Jy, Jz, Spin, Breadth_first_ID, Depth_first_ID, \ Tree_root_ID, Orig_halo_ID, Snap_num, Next_coprogenitor_depthfirst_ID, \ Last_progenitor_depthfirst_ID, Rs_Klypin, M_all, M200b, M200c, M500c, \ M2500c, Xoff, Voff, Spin_Bullock, b_to_a, c_to_a, A_x, A_y, A_z, \ b_to_a_500c, c_to_a_500c, A_x_500c, A_y_500c, A_z_500c, T_over_U, \ M_pe_Behroozi, M_pe_Diemer, Macc, Mpeak, Vacc, Vpeak, Halfmass_Scale, \ Acc_Rate_Inst, Acc_Rate_100Myr, Acc_Rate_Tdyn = \ loadtxt(prefix+"rockstar/hlists/hlist_1.00000.list", unpack=True) # Now we want to convert the proper kpc of the particle position to comoving # Mpc/h, a common unit used in computational cosmology in general, but # specifically is used as the output unit in the merger tree halo list loaded # in above. First we get the Hubble parameter, here stored as 'h_100' in the # SDF parameters. Then we load the simulation width, L0, which is also in # proper kpc. Finally we load the scale factor, a, which for this particular # snapshot is equal to 1 since we are loading the final snapshot from the # simulation. h_100 = particles.parameters['h_100'] width = particles.parameters['L0'] cosmo_a = particles.parameters['a'] kpc_to_Mpc = 1./1000 sl = slice(0,None) # Define a simple function to convert proper to comoving Mpc/h. convert_to_cMpc = lambda proper: (proper + width/2.) * h_100 * kpc_to_Mpc / cosmo_a # Plot all the particles, adding a bit of alpha so that we see the density of # points. import matplotlib.pylab as pl pl.figure(figsize=[10,10]) pl.scatter(convert_to_cMpc(particles['x'][sl]), convert_to_cMpc(particles['y'][sl]), color='b', s=1.0, alpha=0.05) # Plot all the halos in red. pl.scatter(x, y, color='r', alpha=0.1) # Add some labels pl.xlabel('x [cMpc/h]') pl.ylabel('y [cMpc/h]') pl.savefig("halos_and_particles.png", bbox_inches='tight') # Could now consider coloring halos by any of the various quantities above. # Perhaps mvir would be nice to show the virial Mass of the halo, or we could # scale the points by the virial radius, rvir.
mit
justincassidy/scikit-learn
examples/covariance/plot_outlier_detection.py
235
3891
""" ========================================== Outlier detection with several methods. ========================================== When the amount of contamination is known, this example illustrates two different ways of performing :ref:`outlier_detection`: - based on a robust estimator of covariance, which is assuming that the data are Gaussian distributed and performs better than the One-Class SVM in that case. - using the One-Class SVM and its ability to capture the shape of the data set, hence performing better when the data is strongly non-Gaussian, i.e. with two well-separated clusters; The ground truth about inliers and outliers is given by the points colors while the orange-filled area indicates which points are reported as inliers by each method. Here, we assume that we know the fraction of outliers in the datasets. Thus rather than using the 'predict' method of the objects, we set the threshold on the decision_function to separate out the corresponding fraction. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt import matplotlib.font_manager from scipy import stats from sklearn import svm from sklearn.covariance import EllipticEnvelope # Example settings n_samples = 200 outliers_fraction = 0.25 clusters_separation = [0, 1, 2] # define two outlier detection tools to be compared classifiers = { "One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05, kernel="rbf", gamma=0.1), "robust covariance estimator": EllipticEnvelope(contamination=.1)} # Compare given classifiers under given settings xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500)) n_inliers = int((1. - outliers_fraction) * n_samples) n_outliers = int(outliers_fraction * n_samples) ground_truth = np.ones(n_samples, dtype=int) ground_truth[-n_outliers:] = 0 # Fit the problem with varying cluster separation for i, offset in enumerate(clusters_separation): np.random.seed(42) # Data generation X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset X = np.r_[X1, X2] # Add outliers X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))] # Fit the model with the One-Class SVM plt.figure(figsize=(10, 5)) for i, (clf_name, clf) in enumerate(classifiers.items()): # fit the data and tag outliers clf.fit(X) y_pred = clf.decision_function(X).ravel() threshold = stats.scoreatpercentile(y_pred, 100 * outliers_fraction) y_pred = y_pred > threshold n_errors = (y_pred != ground_truth).sum() # plot the levels lines and the points Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) subplot = plt.subplot(1, 2, i + 1) subplot.set_title("Outlier detection") subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7), cmap=plt.cm.Blues_r) a = subplot.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red') subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='orange') b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white') c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black') subplot.axis('tight') subplot.legend( [a.collections[0], b, c], ['learned decision function', 'true inliers', 'true outliers'], prop=matplotlib.font_manager.FontProperties(size=11)) subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors)) subplot.set_xlim((-7, 7)) subplot.set_ylim((-7, 7)) plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26) plt.show()
bsd-3-clause
ndaniels/Ammolite
scripts/figure-generators/smsdIsoCompare.py
1
1534
import matplotlib.pyplot as plt from pylab import polyfit, poly1d, show, savefig import sys def isNumber( s): try: float(s) return True except ValueError: return False def makeGraph(X,Y, xName, yName, name="NoName"): fig = plt.figure() ax = fig.add_subplot(111) superName = "Comparison of {} and {}".format(xName,yName) outname = "{} from {}.png".format(superName,name) fig.suptitle(superName) ax.scatter(X,Y) fit = polyfit(X,Y,1) fit_fn = poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y ax.plot(X,Y, 'yo', X, fit_fn(X), '--k') ax.set_xlabel('Size of MCS found by {}'.format(xName)) ax.set_ylabel('Size of MCS found by {}'.format(yName)) ax.text(1, 1, "y = {}*x + {}".format(fit[0], fit[1])) fig.savefig(outname) def buildIsoSMSDComparison( filename, outname="SMSD-IsoRank-comparison"): X, Y, xName, yName = [], [], "", "" with open( filename) as f: inComparison = False nameLine = False for line in f: if line.split()[0] == "COMPARISON_DELIMITER": if inComparison: makeGraph( X, Y, xName, yName, filename) inComparison = True nameLine = True X, Y = [], [] elif inComparison: l = line.split() if nameLine: xName, yName = l[0], l[1] nameLine = False else: X.append( float( l[0])) Y.append( float( l[1])) makeGraph( X, Y, xName, yName, filename) if __name__ == "__main__": args = sys.argv if(len(args) == 2): buildIsoSMSDComparison(args[1]) else: buildIsoSMSDComparison(args[1], args[2])
gpl-2.0
gdementen/PyTables
c-blosc/bench/plot-speeds.py
11
6852
"""Script for plotting the results of the 'suite' benchmark. Invoke without parameters for usage hints. :Author: Francesc Alted :Date: 2010-06-01 """ import matplotlib as mpl from pylab import * KB_ = 1024 MB_ = 1024*KB_ GB_ = 1024*MB_ NCHUNKS = 128 # keep in sync with bench.c linewidth=2 #markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^'] #markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ] markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ] markersize = 8 def get_values(filename): f = open(filename) values = {"memcpyw": [], "memcpyr": []} for line in f: if line.startswith('-->'): tmp = line.split('-->')[1] nthreads, size, elsize, sbits, codec = [i for i in tmp.split(', ')] nthreads, size, elsize, sbits = map(int, (nthreads, size, elsize, sbits)) values["size"] = size * NCHUNKS / MB_; values["elsize"] = elsize; values["sbits"] = sbits; values["codec"] = codec # New run for nthreads (ratios, speedsw, speedsr) = ([], [], []) # Add a new entry for (ratios, speedw, speedr) values[nthreads] = (ratios, speedsw, speedsr) #print "-->", nthreads, size, elsize, sbits elif line.startswith('memcpy(write):'): tmp = line.split(',')[1] memcpyw = float(tmp.split(' ')[1]) values["memcpyw"].append(memcpyw) elif line.startswith('memcpy(read):'): tmp = line.split(',')[1] memcpyr = float(tmp.split(' ')[1]) values["memcpyr"].append(memcpyr) elif line.startswith('comp(write):'): tmp = line.split(',')[1] speedw = float(tmp.split(' ')[1]) ratio = float(line.split(':')[-1]) speedsw.append(speedw) ratios.append(ratio) elif line.startswith('decomp(read):'): tmp = line.split(',')[1] speedr = float(tmp.split(' ')[1]) speedsr.append(speedr) if "OK" not in line: print "WARNING! OK not found in decomp line!" f.close() return nthreads, values def show_plot(plots, yaxis, legends, gtitle, xmax=None): xlabel('Compresssion ratio') ylabel('Speed (MB/s)') title(gtitle) xlim(0, xmax) #ylim(0, 10000) ylim(0, None) grid(True) # legends = [f[f.find('-'):f.index('.out')] for f in filenames] # legends = [l.replace('-', ' ') for l in legends] #legend([p[0] for p in plots], legends, loc = "upper left") legend([p[0] for p in plots if not isinstance(p, mpl.lines.Line2D)], legends, loc = "best") #subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2) if outfile: print "Saving plot to:", outfile savefig(outfile, dpi=64) else: show() if __name__ == '__main__': from optparse import OptionParser usage = "usage: %prog [-r] [-o outfile] [-t title ] [-d|-c] filename" compress_title = 'Compression speed' decompress_title = 'Decompression speed' yaxis = 'No axis name' parser = OptionParser(usage=usage) parser.add_option('-o', '--outfile', dest='outfile', help=('filename for output (many extensions ' 'supported, e.g. .png, .jpg, .pdf)')) parser.add_option('-t', '--title', dest='title', help='title of the plot',) parser.add_option('-l', '--limit', dest='limit', help='expression to limit number of threads shown',) parser.add_option('-x', '--xmax', dest='xmax', help='limit the x-axis', default=None) parser.add_option('-r', '--report', action='store_true', dest='report', help='generate file for reporting ', default=False) parser.add_option('-d', '--decompress', action='store_true', dest='dspeed', help='plot decompression data', default=False) parser.add_option('-c', '--compress', action='store_true', dest='cspeed', help='plot compression data', default=False) (options, args) = parser.parse_args() if len(args) == 0: parser.error("No input arguments") elif len(args) > 1: parser.error("Too many input arguments") else: pass if options.report and options.outfile: parser.error("Can only select one of [-r, -o]") if options.dspeed and options.cspeed: parser.error("Can only select one of [-d, -c]") elif options.cspeed: options.dspeed = False plot_title = compress_title else: # either neither or dspeed options.dspeed = True plot_title = decompress_title filename = args[0] cspeed = options.cspeed dspeed = options.dspeed if options.outfile: outfile = options.outfile elif options.report: if cspeed: outfile = filename[:filename.rindex('.')] + '-compr.png' else: outfile = filename[:filename.rindex('.')] + '-decompr.png' else: outfile = None plots = [] legends = [] nthreads, values = get_values(filename) #print "Values:", values if options.limit: thread_range = eval(options.limit) else: thread_range = range(1, nthreads+1) if options.title: plot_title = options.title else: plot_title += " (%(size).1f MB, %(elsize)d bytes, %(sbits)d bits), %(codec)s" % values gtitle = plot_title for nt in thread_range: #print "Values for %s threads --> %s" % (nt, values[nt]) (ratios, speedw, speedr) = values[nt] if cspeed: speed = speedw else: speed = speedr #plot_ = semilogx(ratios, speed, linewidth=2) plot_ = plot(ratios, speed, linewidth=2) plots.append(plot_) nmarker = nt if nt >= len(markers): nmarker = nt%len(markers) setp(plot_, marker=markers[nmarker], markersize=markersize, linewidth=linewidth) legends.append("%d threads" % nt) # Add memcpy lines if cspeed: mean = np.mean(values["memcpyw"]) message = "memcpy (write to memory)" else: mean = np.mean(values["memcpyr"]) message = "memcpy (read from memory)" plot_ = axhline(mean, linewidth=3, linestyle='-.', color='black') text(1.0, mean+50, message) plots.append(plot_) show_plot(plots, yaxis, legends, gtitle, xmax=int(options.xmax) if options.xmax else None)
bsd-3-clause
andres-liiver/IAPB13_suvendatud
Kodutoo_16/Kodutoo_16_Andres.py
1
2985
''' Kodutoo 16 14.11.2014 Andres Liiver ''' import time from matplotlib import pyplot as plt from Tund16gen import * def timeFunc(func, *args): start = time.clock() func(*args) return time.clock() - start def linear_search(lst, num): for item in lst: if item == num: return True return False def binary_search(lst, num, sort=False): if sort: lst = sorted(lst) imin = 0 imax = len(lst)-1 while imax >= imin: imid = (imin+imax) // 2 if lst[imid] == num: return True elif lst[imid] < num: imin = imid + 1 else: imax = imid - 1 return False def main(): linearTimes = [] binary1Times = [] binary2Times = [] ns = [2**i for i in range(1, 13)] for n in ns: lst, gen = gimme_my_input(n, "blah") times = [] # linear search test for i in range(len(lst)): times.append(timeFunc(linear_search, lst, next(gen))) avg_time = sum(times) / len(times) linearTimes.append(avg_time) # binary search test 1 times = [] sortedList = sorted(lst) for i in range(len(lst)): times.append(timeFunc(binary_search, sortedList, next(gen))) avg_time = sum(times) / len(times) binary1Times.append(avg_time) # binary search test 2 times = [] for i in range(len(lst)): times.append(timeFunc(binary_search, lst, next(gen), True)) avg_time = sum(times) / len(times) binary2Times.append(avg_time) # print table of results print("| algorithm \t| n \t\t| time (s)") print() # print Linear Search for i, n in enumerate(ns): if n < 10000: print("| {0} \t| {1} \t\t| {2:.8f}".format("Linear", n, linearTimes[i])) else: print("| {0} \t| {1} \t| {2:.8f}".format("Linear", n, linearTimes[i])) print() # print Binary Search (presorted) for i, n in enumerate(ns): if n < 10000: print("| {0} | {1} \t\t| {2:.8f}".format("Bin (presort)", n, binary1Times[i])) else: print("| {0} | {1} \t| {2:.8f}".format("Bin (presort)", n, binary1Times[i])) print() # print Binary Search (sort) for i, n in enumerate(ns): if n < 10000: print("| {0} \t| {1} \t\t| {2:.8f}".format("Bin (sort)", n, binary2Times[i])) else: print("| {0} \t| {1} \t| {2:.8f}".format("Bin (sort)", n, binary2Times[i])) # plot the times ax = plt.subplot() ax.set_xlabel("n") ax.set_xscale("log") ax.set_ylabel("Time (s)") ax.set_yscale("log") ax.plot(ns, linearTimes, "r", label="Linear Search") ax.plot(ns, binary1Times, "g", label="Binary Search (presorted)") ax.plot(ns, binary2Times, "b", label="Binary Search (sort)") ax.legend(loc="upper left", shadow=True); plt.show() if __name__ == "__main__": main()
mit
hunse/vrep-python
dvs-play.py
1
1515
""" Play DVS events in real time TODO: deal with looping event times for recordings > 65 s """ import numpy as np import matplotlib.pyplot as plt import dvs def close(a, b, atol=1e-8, rtol=1e-5): return np.abs(a - b) < atol + rtol * b def imshow(image, ax=None): ax = plt.gca() if ax is None else ax ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None) def add_to_image(image, events): for x, y, s, _ in events: image[y, x] += 1 if s else -1 def as_image(events): image = np.zeros((128, 128), dtype=float) add_to_image(image, events) return image # filename = 'dvs.npz' filename = 'dvs-ball-10ms.npz' events = dvs.load(filename, dt_round=True) udiffs = np.unique(np.diff(np.unique(events['t']))) assert np.allclose(udiffs, 0.01) plt.figure(1) plt.clf() times = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7] for i in range(6): plt.subplot(2, 3, i+1) imshow(as_image(events[close(events['t'], times[i])])) plt.title("t = %0.3f" % times[i]) # plt.figure(1) # plt.clf() # image = np.zeros((128, 128), dtype=float) # plt_image = plt.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None) # plt.gca().invert_yaxis() # while t0 < t_max: # time.sleep(0.001) # t1 = time.time() - t_world # new_events = events[(ts > t0) & (ts < t1)] # dt = t1 - t0 # image *= np.exp(-dt / 0.01) # for x, y, s, _ in new_events: # image[y, x] += 1 if s else -1 # plt_image.set_data(image) # plt.draw() # t0 = t1 plt.show()
gpl-2.0
Srisai85/scikit-learn
examples/covariance/plot_sparse_cov.py
300
5078
""" ====================================== Sparse inverse covariance estimation ====================================== Using the GraphLasso estimator to learn a covariance and sparse precision from a small number of samples. To estimate a probabilistic model (e.g. a Gaussian model), estimating the precision matrix, that is the inverse covariance matrix, is as important as estimating the covariance matrix. Indeed a Gaussian model is parametrized by the precision matrix. To be in favorable recovery conditions, we sample the data from a model with a sparse inverse covariance matrix. In addition, we ensure that the data is not too much correlated (limiting the largest coefficient of the precision matrix) and that there a no small coefficients in the precision matrix that cannot be recovered. In addition, with a small number of observations, it is easier to recover a correlation matrix rather than a covariance, thus we scale the time series. Here, the number of samples is slightly larger than the number of dimensions, thus the empirical covariance is still invertible. However, as the observations are strongly correlated, the empirical covariance matrix is ill-conditioned and as a result its inverse --the empirical precision matrix-- is very far from the ground truth. If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number of samples is small, we need to shrink a lot. As a result, the Ledoit-Wolf precision is fairly close to the ground truth precision, that is not far from being diagonal, but the off-diagonal structure is lost. The l1-penalized estimator can recover part of this off-diagonal structure. It learns a sparse precision. It is not able to recover the exact sparsity pattern: it detects too many non-zero coefficients. However, the highest non-zero coefficients of the l1 estimated correspond to the non-zero coefficients in the ground truth. Finally, the coefficients of the l1 precision estimate are biased toward zero: because of the penalty, they are all smaller than the corresponding ground truth value, as can be seen on the figure. Note that, the color range of the precision matrices is tweaked to improve readability of the figure. The full range of values of the empirical precision is not displayed. The alpha parameter of the GraphLasso setting the sparsity of the model is set by internal cross-validation in the GraphLassoCV. As can be seen on figure 2, the grid to compute the cross-validation score is iteratively refined in the neighborhood of the maximum. """ print(__doc__) # author: Gael Varoquaux <gael.varoquaux@inria.fr> # License: BSD 3 clause # Copyright: INRIA import numpy as np from scipy import linalg from sklearn.datasets import make_sparse_spd_matrix from sklearn.covariance import GraphLassoCV, ledoit_wolf import matplotlib.pyplot as plt ############################################################################## # Generate the data n_samples = 60 n_features = 20 prng = np.random.RandomState(1) prec = make_sparse_spd_matrix(n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng) cov = linalg.inv(prec) d = np.sqrt(np.diag(cov)) cov /= d cov /= d[:, np.newaxis] prec *= d prec *= d[:, np.newaxis] X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) X -= X.mean(axis=0) X /= X.std(axis=0) ############################################################################## # Estimate the covariance emp_cov = np.dot(X.T, X) / n_samples model = GraphLassoCV() model.fit(X) cov_ = model.covariance_ prec_ = model.precision_ lw_cov_, _ = ledoit_wolf(X) lw_prec_ = linalg.inv(lw_cov_) ############################################################################## # Plot the results plt.figure(figsize=(10, 6)) plt.subplots_adjust(left=0.02, right=0.98) # plot the covariances covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_), ('GraphLasso', cov_), ('True', cov)] vmax = cov_.max() for i, (name, this_cov) in enumerate(covs): plt.subplot(2, 4, i + 1) plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s covariance' % name) # plot the precisions precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_), ('GraphLasso', prec_), ('True', prec)] vmax = .9 * prec_.max() for i, (name, this_prec) in enumerate(precs): ax = plt.subplot(2, 4, i + 5) plt.imshow(np.ma.masked_equal(this_prec, 0), interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s precision' % name) ax.set_axis_bgcolor('.7') # plot the model selection metric plt.figure(figsize=(4, 3)) plt.axes([.2, .15, .75, .7]) plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-') plt.axvline(model.alpha_, color='.5') plt.title('Model selection') plt.ylabel('Cross-validation score') plt.xlabel('alpha') plt.show()
bsd-3-clause
belkinsky/SFXbot
src/pyAudioAnalysis/audioTrainTest.py
1
46228
import sys import numpy import time import os import glob import pickle import shutil import audioop import signal import csv import ntpath from . import audioFeatureExtraction as aF from . import audioBasicIO from matplotlib.mlab import find import matplotlib.pyplot as plt import scipy.io as sIO from scipy import linalg as la from scipy.spatial import distance import sklearn.svm import sklearn.decomposition import sklearn.ensemble def signal_handler(signal, frame): print('You pressed Ctrl+C! - EXIT') os.system("stty -cbreak echo") sys.exit(0) signal.signal(signal.SIGINT, signal_handler) shortTermWindow = 0.050 shortTermStep = 0.050 eps = 0.00000001 class kNN: def __init__(self, X, Y, k): self.X = X self.Y = Y self.k = k def classify(self, testSample): nClasses = numpy.unique(self.Y).shape[0] YDist = (distance.cdist(self.X, testSample.reshape(1, testSample.shape[0]), 'euclidean')).T iSort = numpy.argsort(YDist) P = numpy.zeros((nClasses,)) for i in range(nClasses): P[i] = numpy.nonzero(self.Y[iSort[0][0:self.k]] == i)[0].shape[0] / float(self.k) return (numpy.argmax(P), P) def classifierWrapper(classifier, classifierType, testSample): ''' This function is used as a wrapper to pattern classification. ARGUMENTS: - classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier - classifierType: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees" - testSample: a feature vector (numpy array) RETURNS: - R: class ID - P: probability estimate EXAMPLE (for some audio signal stored in array x): import audioFeatureExtraction as aF import audioTrainTest as aT # load the classifier (here SVM, for kNN use loadKNNModel instead): [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep] = aT.loadSVModel(modelName) # mid-term feature extraction: [MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep)); # feature normalization: curFV = (MidTermFeatures[:, i] - MEAN) / STD; # classification [Result, P] = classifierWrapper(Classifier, modelType, curFV) ''' R = -1 P = -1 if classifierType == "knn": [R, P] = classifier.classify(testSample) elif classifierType == "svm" or classifierType == "randomforest" or classifierType == "gradientboosting" or "extratrees": R = classifier.predict(testSample.reshape(1,-1))[0] P = classifier.predict_proba(testSample.reshape(1,-1))[0] return [R, P] def regressionWrapper(model, modelType, testSample): ''' This function is used as a wrapper to pattern classification. ARGUMENTS: - model: regression model - modelType: "svm" or "knn" (TODO) - testSample: a feature vector (numpy array) RETURNS: - R: regression result (estimated value) EXAMPLE (for some audio signal stored in array x): TODO ''' if modelType == "svm" or modelType == "randomforest": return (model.predict(testSample.reshape(1,-1))[0]) # elif classifierType == "knn": # TODO return None def randSplitFeatures(features, partTrain): ''' def randSplitFeatures(features): This function splits a feature set for training and testing. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] - partTrain: percentage RETURNS: - featuresTrains: a list of training data for each class - featuresTest: a list of testing data for each class ''' featuresTrain = [] featuresTest = [] for i, f in enumerate(features): [numOfSamples, numOfDims] = f.shape randperm = numpy.random.permutation(list(range(numOfSamples))) nTrainSamples = int(round(partTrain * numOfSamples)) featuresTrain.append(f[randperm[0:nTrainSamples]]) featuresTest.append(f[randperm[nTrainSamples::]]) return (featuresTrain, featuresTest) def trainKNN(features, K): ''' Train a kNN classifier. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] - K: parameter K RETURNS: - kNN: the trained kNN variable ''' [Xt, Yt] = listOfFeatures2Matrix(features) knn = kNN(Xt, Yt, K) return knn def trainSVM(features, Cparam): ''' Train a multi-class probabilitistic SVM classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - Cparam: SVM parameter C (cost of constraints violation) RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear', probability = True) svm.fit(X,Y) return svm def trainRandomForest(features, n_estimators): ''' Train a multi-class decision tree classifier. Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators) rf.fit(X,Y) return rf def trainGradientBoosting(features, n_estimators): ''' Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for SVM training See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators = n_estimators) rf.fit(X,Y) return rf def trainExtraTrees(features, n_estimators): ''' Train a gradient boosting classifier Note: This function is simply a wrapper to the sklearn functionality for extra tree classifiers See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes. ARGUMENTS: - features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features each matrix features[i] of class i is [numOfSamples x numOfDimensions] - n_estimators: number of trees in the forest RETURNS: - svm: the trained SVM variable NOTE: This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided. ''' [X, Y] = listOfFeatures2Matrix(features) et = sklearn.ensemble.ExtraTreesClassifier(n_estimators = n_estimators) et.fit(X,Y) return et def trainSVMregression(Features, Y, Cparam): svm = sklearn.svm.SVR(C = Cparam, kernel = 'linear') print(Features.shape, Y) svm.fit(Features,Y) trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y)) return svm, trainError # TODO (not avaiable for regression?) #def trainRandomForestRegression(Features, Y, n_estimators): # rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators) # print Features.shape, Y # rf.fit(Features,Y) # trainError = numpy.mean(numpy.abs(rf.predict(Features) - Y)) # return rf, trainError def featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, computeBEAT=False, perTrain=0.90): ''' This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: listOfDirs: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files. mtWin, mtStep: mid-term window length and step stWin, stStep: short-term window and step classifierType: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees" modelName: name of the model to be saved RETURNS: None. Resulting classifier along with the respective model parameters are saved on files. ''' # STEP A: Feature Extraction: [features, classNames, _] = aF.dirsWavFeatureExtraction(listOfDirs, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT) if len(features) == 0: print("trainSVM_feature ERROR: No data found in any input folder!") return numOfFeatures = features[0].shape[1] featureNames = ["features" + str(d + 1) for d in range(numOfFeatures)] writeTrainDataToARFF(modelName, features, classNames, featureNames) for i, f in enumerate(features): if len(f) == 0: print("trainSVM_feature ERROR: " + listOfDirs[i] + " folder is empty or non-existing!") return # STEP B: Classifier Evaluation and Parameter Selection: if classifierType == "svm": classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0]) elif classifierType == "randomforest": classifierParams = numpy.array([10, 25, 50, 100,200,500]) elif classifierType == "knn": classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]) elif classifierType == "gradientboosting": classifierParams = numpy.array([10, 25, 50, 100,200,500]) elif classifierType == "extratrees": classifierParams = numpy.array([10, 25, 50, 100,200,500]) # get optimal classifeir parameter: bestParam = evaluateClassifier(features, classNames, 100, classifierType, classifierParams, 0, perTrain) print("Selected params: {0:.5f}".format(bestParam)) C = len(classNames) [featuresNorm, MEAN, STD] = normalizeFeatures(features) # normalize features MEAN = MEAN.tolist() STD = STD.tolist() featuresNew = featuresNorm # STEP C: Save the classifier to file if classifierType == "svm": Classifier = trainSVM(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "randomforest": Classifier = trainRandomForest(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "gradientboosting": Classifier = trainGradientBoosting(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "extratrees": Classifier = trainExtraTrees(featuresNew, bestParam) with open(modelName, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() elif classifierType == "knn": [X, Y] = listOfFeatures2Matrix(featuresNew) X = X.tolist() Y = Y.tolist() fo = open(modelName, "wb") pickle.dump(X, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(Y, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(bestParam, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() def featureAndTrainRegression(dirName, mtWin, mtStep, stWin, stStep, modelType, modelName, computeBEAT=False): ''' This function is used as a wrapper to segment-based audio feature extraction and classifier training. ARGUMENTS: dirName: path of directory containing the WAV files and Regression CSVs mtWin, mtStep: mid-term window length and step stWin, stStep: short-term window and step modelType: "svm" or "knn" or "randomforest" modelName: name of the model to be saved RETURNS: None. Resulting regression model along with the respective model parameters are saved on files. ''' # STEP A: Feature Extraction: [features, _, fileNames] = aF.dirsWavFeatureExtraction([dirName], mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT) features = features[0] fileNames = [ntpath.basename(f) for f in fileNames[0]] # Read CSVs: CSVs = glob.glob(dirName + os.sep + "*.csv") regressionLabels = [] regressionNames = [] for c in CSVs: # for each CSV curRegressionLabels = numpy.zeros((len(fileNames, ))) # read filenames, map to "fileNames" and append respective values in the regressionLabels with open(c, 'rb') as csvfile: CSVreader = csv.reader(csvfile, delimiter=',', quotechar='|') for row in CSVreader: if len(row) == 2: if row[0]+".wav" in fileNames: index = fileNames.index(row[0]+".wav") curRegressionLabels[index] = float(row[1]) regressionLabels.append(curRegressionLabels) # curRegressionLabels is the list of values for the current regression problem regressionNames.append(ntpath.basename(c).replace(".csv", "")) # regression task name if len(features) == 0: print("ERROR: No data found in any input folder!") return numOfFeatures = features.shape[1] # TODO: ARRF WRITE???? # STEP B: Classifier Evaluation and Parameter Selection: if modelType == "svm": modelParams = numpy.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0]) elif modelType == "randomforest": modelParams = numpy.array([5, 10, 25, 50, 100]) # elif modelType == "knn": # modelParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]); for iRegression, r in enumerate(regressionNames): # get optimal classifeir parameter: print("Regression task " + r) bestParam = evaluateRegression(features, regressionLabels[iRegression], 100, modelType, modelParams) print("Selected params: {0:.5f}".format(bestParam)) [featuresNorm, MEAN, STD] = normalizeFeatures([features]) # normalize features # STEP C: Save the model to file if modelType == "svm": Classifier, _ = trainSVMregression(featuresNorm[0], regressionLabels[iRegression], bestParam) with open(modelName + "_" + r, 'wb') as fid: # save to file pickle.dump(Classifier, fid) fo = open(modelName + "_" + r + "MEANS", "wb") pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL) fo.close() ''' TODO elif modelType == "randomforest": Classifier, _ = trainRandomForestRegression(featuresNorm[0], regressionLabels[iRegression], bestParam) with open(modelName + "_" + r, 'wb') as fid: # save to file cPickle.dump(Classifier, fid) fo = open(modelName + "_" + r + "MEANS", "wb") cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() ''' # elif classifierType == "knn": def loadKNNModel(kNNModelName, isRegression=False): try: fo = open(kNNModelName, "rb") except IOError: print("didn't find file") return try: X = pickle.load(fo) Y = pickle.load(fo) MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) K = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() X = numpy.array(X) Y = numpy.array(Y) MEAN = numpy.array(MEAN) STD = numpy.array(STD) Classifier = kNN(X, Y, K) # Note: a direct call to the kNN constructor is used here if isRegression: return(Classifier, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadSVModel(SVMmodelName, isRegression=False): ''' This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(SVMmodelName+"MEANS", "rb") except IOError: print("Load SVM Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(SVMmodelName, 'rb') as fid: SVM = pickle.load(fid) if isRegression: return(SVM, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(SVM, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadRandomForestModel(RFmodelName, isRegression=False): ''' This function loads an SVM model either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(RFmodelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(RFmodelName, 'rb') as fid: RF = pickle.load(fid) if isRegression: return(RF, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(RF, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadGradientBoostingModel(GBModelName, isRegression=False): ''' This function loads gradient boosting either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(GBModelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(GBModelName, 'rb') as fid: GB = pickle.load(fid) if isRegression: return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def loadExtraTreesModel(ETmodelName, isRegression=False): ''' This function loads extra trees either for classification or training. ARGMUMENTS: - SVMmodelName: the path of the model to be loaded - isRegression: a flag indigating whereas this model is regression or not ''' try: fo = open(ETmodelName+"MEANS", "rb") except IOError: print("Load Random Forest Model: Didn't find file") return try: MEAN = pickle.load(fo) STD = pickle.load(fo) if not isRegression: classNames = pickle.load(fo) mtWin = pickle.load(fo) mtStep = pickle.load(fo) stWin = pickle.load(fo) stStep = pickle.load(fo) computeBEAT = pickle.load(fo) except: fo.close() fo.close() MEAN = numpy.array(MEAN) STD = numpy.array(STD) COEFF = [] with open(ETmodelName, 'rb') as fid: GB = pickle.load(fid) if isRegression: return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT) else: return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT) def evaluateClassifier(features, ClassNames, nExp, ClassifierName, Params, parameterMode, perTrain=0.90): ''' ARGUMENTS: features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features. each matrix features[i] of class i is [numOfSamples x numOfDimensions] ClassNames: list of class names (strings) nExp: number of cross-validation experiments ClassifierName: svm or knn or randomforest Params: list of classifier parameters (for parameter tuning during cross-validation) parameterMode: 0: choose parameters that lead to maximum overall classification ACCURACY 1: choose parameters that lead to maximum overall F1 MEASURE RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure ''' # feature normalization: (featuresNorm, MEAN, STD) = normalizeFeatures(features) #featuresNorm = features; nClasses = len(features) CAll = [] acAll = [] F1All = [] PrecisionClassesAll = [] RecallClassesAll = [] ClassesAll = [] F1ClassesAll = [] CMsAll = [] # compute total number of samples: nSamplesTotal = 0 for f in features: nSamplesTotal += f.shape[0] if nSamplesTotal > 1000 and nExp > 50: nExp = 50 print("Number of training experiments changed to 50 due to high number of samples") if nSamplesTotal > 2000 and nExp > 10: nExp = 10 print("Number of training experiments changed to 10 due to high number of samples") for Ci, C in enumerate(Params): # for each param value CM = numpy.zeros((nClasses, nClasses)) for e in range(nExp): # for each cross-validation iteration: print("Param = {0:.5f} - Classifier Evaluation Experiment {1:d} of {2:d}".format(C, e+1, nExp)) # split features: featuresTrain, featuresTest = randSplitFeatures(featuresNorm, perTrain) # train multi-class svms: if ClassifierName == "svm": Classifier = trainSVM(featuresTrain, C) elif ClassifierName == "knn": Classifier = trainKNN(featuresTrain, C) elif ClassifierName == "randomforest": Classifier = trainRandomForest(featuresTrain, C) elif ClassifierName == "gradientboosting": Classifier = trainGradientBoosting(featuresTrain, C) elif ClassifierName == "extratrees": Classifier = trainExtraTrees(featuresTrain, C) CMt = numpy.zeros((nClasses, nClasses)) for c1 in range(nClasses): #Results = Classifier.pred(featuresTest[c1]) nTestSamples = len(featuresTest[c1]) Results = numpy.zeros((nTestSamples, 1)) for ss in range(nTestSamples): [Results[ss], _] = classifierWrapper(Classifier, ClassifierName, featuresTest[c1][ss]) for c2 in range(nClasses): CMt[c1][c2] = float(len(numpy.nonzero(Results == c2)[0])) CM = CM + CMt CM = CM + 0.0000000010 Rec = numpy.zeros((CM.shape[0], )) Pre = numpy.zeros((CM.shape[0], )) for ci in range(CM.shape[0]): Rec[ci] = CM[ci, ci] / numpy.sum(CM[ci, :]) Pre[ci] = CM[ci, ci] / numpy.sum(CM[:, ci]) PrecisionClassesAll.append(Pre) RecallClassesAll.append(Rec) F1 = 2 * Rec * Pre / (Rec + Pre) F1ClassesAll.append(F1) acAll.append(numpy.sum(numpy.diagonal(CM)) / numpy.sum(CM)) CMsAll.append(CM) F1All.append(numpy.mean(F1)) # print "{0:6.4f}{1:6.4f}{2:6.1f}{3:6.1f}".format(nu, g, 100.0*acAll[-1], 100.0*F1All[-1]) print(("\t\t"), end=' ') for i, c in enumerate(ClassNames): if i == len(ClassNames)-1: print("{0:s}\t\t".format(c), end=' ') else: print("{0:s}\t\t\t".format(c), end=' ') print ("OVERALL") print(("\tC"), end=' ') for c in ClassNames: print("\tPRE\tREC\tF1", end=' ') print("\t{0:s}\t{1:s}".format("ACC", "F1")) bestAcInd = numpy.argmax(acAll) bestF1Ind = numpy.argmax(F1All) for i in range(len(PrecisionClassesAll)): print("\t{0:.3f}".format(Params[i]), end=' ') for c in range(len(PrecisionClassesAll[i])): print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * PrecisionClassesAll[i][c], 100.0 * RecallClassesAll[i][c], 100.0 * F1ClassesAll[i][c]), end=' ') print("\t{0:.1f}\t{1:.1f}".format(100.0 * acAll[i], 100.0 * F1All[i]), end=' ') if i == bestF1Ind: print("\t best F1", end=' ') if i == bestAcInd: print("\t best Acc", end=' ') print() if parameterMode == 0: # keep parameters that maximize overall classification accuracy: print("Confusion Matrix:") printConfusionMatrix(CMsAll[bestAcInd], ClassNames) return Params[bestAcInd] elif parameterMode == 1: # keep parameters that maximize overall F1 measure: print("Confusion Matrix:") printConfusionMatrix(CMsAll[bestF1Ind], ClassNames) return Params[bestF1Ind] def evaluateRegression(features, labels, nExp, MethodName, Params): ''' ARGUMENTS: features: numpy matrices of features [numOfSamples x numOfDimensions] labels: list of sample labels nExp: number of cross-validation experiments MethodName: "svm" or "randomforest" Params: list of classifier params to be evaluated RETURNS: bestParam: the value of the input parameter that optimizes the selected performance measure ''' # feature normalization: (featuresNorm, MEAN, STD) = normalizeFeatures([features]) featuresNorm = featuresNorm[0] nSamples = labels.shape[0] partTrain = 0.9 ErrorsAll = [] ErrorsTrainAll = [] ErrorsBaselineAll = [] for Ci, C in enumerate(Params): # for each param value Errors = [] ErrorsTrain = [] ErrorsBaseline = [] for e in range(nExp): # for each cross-validation iteration: # split features: randperm = numpy.random.permutation(list(range(nSamples))) nTrain = int(round(partTrain * nSamples)) featuresTrain = [featuresNorm[randperm[i]] for i in range(nTrain)] featuresTest = [featuresNorm[randperm[i+nTrain]] for i in range(nSamples - nTrain)] labelsTrain = [labels[randperm[i]] for i in range(nTrain)] labelsTest = [labels[randperm[i + nTrain]] for i in range(nSamples - nTrain)] # train multi-class svms: featuresTrain = numpy.matrix(featuresTrain) if MethodName == "svm": [Classifier, trainError] = trainSVMregression(featuresTrain, labelsTrain, C) # TODO #elif MethodName == "randomforest": # [Classifier, trainError] = trainRandomForestRegression(featuresTrain, labelsTrain, C) # TODO KNN # elif ClassifierName=="knn": # Classifier = trainKNN(featuresTrain, C) ErrorTest = [] ErrorTestBaseline = [] for itest, fTest in enumerate(featuresTest): R = regressionWrapper(Classifier, MethodName, fTest) Rbaseline = numpy.mean(labelsTrain) ErrorTest.append((R - labelsTest[itest]) * (R - labelsTest[itest])) ErrorTestBaseline.append((Rbaseline - labelsTest[itest]) * (Rbaseline - labelsTest[itest])) Error = numpy.array(ErrorTest).mean() ErrorBaseline = numpy.array(ErrorTestBaseline).mean() Errors.append(Error) ErrorsTrain.append(trainError) ErrorsBaseline.append(ErrorBaseline) ErrorsAll.append(numpy.array(Errors).mean()) ErrorsTrainAll.append(numpy.array(ErrorsTrain).mean()) ErrorsBaselineAll.append(numpy.array(ErrorsBaseline).mean()) bestInd = numpy.argmin(ErrorsAll) print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE")) for i in range(len(ErrorsAll)): print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(Params[i], ErrorsAll[i], ErrorsTrainAll[i], ErrorsBaselineAll[i]), end=' ') if i == bestInd: print("\t\t best", end=' ') print() return Params[bestInd] def printConfusionMatrix(CM, ClassNames): ''' This function prints a confusion matrix for a particular classification task. ARGUMENTS: CM: a 2-D numpy array of the confusion matrix (CM[i,j] is the number of times a sample from class i was classified in class j) ClassNames: a list that contains the names of the classes ''' if CM.shape[0] != len(ClassNames): print("printConfusionMatrix: Wrong argument sizes\n") return for c in ClassNames: if len(c) > 4: c = c[0:3] print("\t{0:s}".format(c), end=' ') print() for i, c in enumerate(ClassNames): if len(c) > 4: c = c[0:3] print("{0:s}".format(c), end=' ') for j in range(len(ClassNames)): print("\t{0:.1f}".format(100.0 * CM[i][j] / numpy.sum(CM)), end=' ') print() def normalizeFeatures(features): ''' This function normalizes a feature set to 0-mean and 1-std. Used in most classifier trainning cases. ARGUMENTS: - features: list of feature matrices (each one of them is a numpy matrix) RETURNS: - featuresNorm: list of NORMALIZED feature matrices - MEAN: mean vector - STD: std vector ''' X = numpy.array([]) for count, f in enumerate(features): if f.shape[0] > 0: if count == 0: X = f else: X = numpy.vstack((X, f)) count += 1 MEAN = numpy.mean(X, axis=0) STD = numpy.std(X, axis=0) featuresNorm = [] for f in features: ft = f.copy() for nSamples in range(f.shape[0]): ft[nSamples, :] = (ft[nSamples, :] - MEAN) / STD featuresNorm.append(ft) return (featuresNorm, MEAN, STD) def listOfFeatures2Matrix(features): ''' listOfFeatures2Matrix(features) This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels. ARGUMENTS: - features: a list of feature matrices RETURNS: - X: a concatenated matrix of features - Y: a vector of class indeces ''' X = numpy.array([]) Y = numpy.array([]) for i, f in enumerate(features): if i == 0: X = f Y = i * numpy.ones((len(f), 1)) else: X = numpy.vstack((X, f)) Y = numpy.append(Y, i * numpy.ones((len(f), 1))) return (X, Y) def pcaDimRed(features, nDims): [X, Y] = listOfFeatures2Matrix(features) pca = sklearn.decomposition.PCA(n_components = nDims) pca.fit(X) coeff = pca.components_ coeff = coeff[:, 0:nDims] featuresNew = [] for f in features: ft = f.copy() # ft = pca.transform(ft, k=nDims) ft = numpy.dot(f, coeff) featuresNew.append(ft) return (featuresNew, coeff) def fileClassification(inputFile, modelName, modelType): # Load classifier: if not os.path.isfile(inputFile): print("fileClassification: wav file not found!") return (-1, -1, -1) [Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono x = audioBasicIO.stereo2mono(x) return fragmentClassification(Fs, x, modelName, modelType) def fragmentClassification(Fs, x, modelName, modelType): if not os.path.isfile(modelName): print("fileClassification: input modelName not found!") return (-1, -1, -1) if modelType == 'svm': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(modelName) elif modelType == 'knn': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(modelName) elif modelType == 'randomforest': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadRandomForestModel(modelName) elif modelType == 'gradientboosting': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadGradientBoostingModel(modelName) elif modelType == 'extratrees': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadExtraTreesModel(modelName) # feature extraction: [MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep)) MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics if computeBEAT: [beat, beatConf] = aF.beatExtraction(s, stStep) MidTermFeatures = numpy.append(MidTermFeatures, beat) MidTermFeatures = numpy.append(MidTermFeatures, beatConf) curFV = (MidTermFeatures - MEAN) / STD # normalization [Result, P] = classifierWrapper(Classifier, modelType, curFV) # classification return Result, P, classNames def fileRegression(inputFile, modelName, modelType): # Load classifier: if not os.path.isfile(inputFile): print("fileClassification: wav file not found!") return (-1, -1, -1) regressionModels = glob.glob(modelName + "_*") regressionModels2 = [] for r in regressionModels: if r[-5::] != "MEANS": regressionModels2.append(r) regressionModels = regressionModels2 regressionNames = [] for r in regressionModels: regressionNames.append(r[r.rfind("_")+1::]) # FEATURE EXTRACTION # LOAD ONLY THE FIRST MODEL (for mtWin, etc) if modelType == 'svm': [_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(regressionModels[0], True) elif modelType == 'knn': [_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(regressionModels[0], True) [Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono x = audioBasicIO.stereo2mono(x) # feature extraction: [MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep)) MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics if computeBEAT: [beat, beatConf] = aF.beatExtraction(s, stStep) MidTermFeatures = numpy.append(MidTermFeatures, beat) MidTermFeatures = numpy.append(MidTermFeatures, beatConf) # REGRESSION R = [] for ir, r in enumerate(regressionModels): if not os.path.isfile(r): print("fileClassification: input modelName not found!") return (-1, -1, -1) if modelType == 'svm': [Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(r, True) elif modelType == 'knn': [Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(r, True) curFV = (MidTermFeatures - MEAN) / STD # normalization R.append(regressionWrapper(Model, modelType, curFV)) # classification return R, regressionNames def lda(data, labels, redDim): # Centre data data -= data.mean(axis=0) nData = numpy.shape(data)[0] nDim = numpy.shape(data)[1] print(nData, nDim) Sw = numpy.zeros((nDim, nDim)) Sb = numpy.zeros((nDim, nDim)) C = numpy.cov((data.T)) # Loop over classes classes = numpy.unique(labels) for i in range(len(classes)): # Find relevant datapoints indices = (numpy.where(labels == classes[i])) d = numpy.squeeze(data[indices, :]) classcov = numpy.cov((d.T)) Sw += float(numpy.shape(indices)[0])/nData * classcov Sb = C - Sw # Now solve for W # Compute eigenvalues, eigenvectors and sort into order #evals,evecs = linalg.eig(dot(linalg.pinv(Sw),sqrt(Sb))) evals, evecs = la.eig(Sw, Sb) indices = numpy.argsort(evals) indices = indices[::-1] evecs = evecs[:, indices] evals = evals[indices] w = evecs[:, :redDim] #print evals, w newData = numpy.dot(data, w) #for i in range(newData.shape[0]): # plt.text(newData[i,0],newData[i,1],str(labels[i])) #plt.xlim([newData[:,0].min(), newData[:,0].max()]) #plt.ylim([newData[:,1].min(), newData[:,1].max()]) #plt.show() return newData, w def writeTrainDataToARFF(modelName, features, classNames, featureNames): f = open(modelName + ".arff", 'w') f.write('@RELATION ' + modelName + '\n') for fn in featureNames: f.write('@ATTRIBUTE ' + fn + ' NUMERIC\n') f.write('@ATTRIBUTE class {') for c in range(len(classNames)-1): f.write(classNames[c] + ',') f.write(classNames[-1] + '}\n\n') f.write('@DATA\n') for c, fe in enumerate(features): for i in range(fe.shape[0]): for j in range(fe.shape[1]): f.write("{0:f},".format(fe[i, j])) f.write(classNames[c]+"\n") f.close() def trainSpeakerModelsScript(): ''' This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included) import audioTrainTest as aT aT.trainSpeakerModelsScript() ''' mtWin = 2.0 mtStep = 2.0 stWin = 0.020 stStep = 0.020 dirName = "DIARIZATION_ALL/all" listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))] featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerAll", computeBEAT=False, perTrain=0.50) dirName = "DIARIZATION_ALL/female_male" listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))] featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerFemaleMale", computeBEAT=False, perTrain=0.50) def main(argv): return 0 if __name__ == '__main__': main(sys.argv)
mit
SU-ECE-17-7/hotspotter
_graveyard/voting_rules1.py
2
20685
#_________________ # OLD def build_voters_profile(hs, qcx, K): '''This is too similar to assign_matches_vsmany right now''' cx2_nx = hs.tables.cx2_nx hs.ensure_matcher(match_type='vsmany', K=K) K += 1 cx2_desc = hs.feats.cx2_desc cx2_kpts = hs.feats.cx2_kpts cx2_rchip_size = hs.get_cx2_rchip_size() desc1 = cx2_desc[qcx] args = hs.matcher.vsmany_args vsmany_flann = args.vsmany_flann ax2_cx = args.ax2_cx ax2_fx = args.ax2_fx print('[invest] Building voter preferences over %s indexed descriptors. K=%r' % (helpers.commas(len(ax2_cx)), K)) nn_args = (args, qcx, cx2_kpts, cx2_desc, cx2_rchip_size, K+1) nn_result = mc2.vsmany_nearest_neighbors(*nn_args) (qfx2_ax, qfx2_dists, qfx2_valid) = nn_result vote_dists = qfx2_dists[:, 0:K] norm_dists = qfx2_dists[:, K] # k+1th descriptor for normalization # Score the feature matches qfx2_score = np.array([mc2.LNBNN_fn(_vdist.T, norm_dists) for _vdist in vote_dists.T]).T # Vote using the inverted file qfx2_cx = ax2_cx[qfx2_ax[:, 0:K]] qfx2_fx = ax2_fx[qfx2_ax[:, 0:K]] qfx2_valid = qfx2_valid[:, 0:K] qfx2_nx = temporary_names(qfx2_cx, cx2_nx[qfx2_cx], zeroed_cx_list=[qcx]) voters_profile = (qfx2_nx, qfx2_cx, qfx2_fx, qfx2_score, qfx2_valid) return voters_profile #def filter_alternative_frequencies2(alternative_ids1, qfx2_altx1, correct_altx, max_cands=32): def filter_alternative_frequencies(alternative_ids1, qfx2_altx1, correct_altx, max_cands=32): 'determines the alternatives who appear the most and filters out the least occuring' alternative_ids = alternative_ids.copy() qfx2_altx = qfx2_altx.copy() altx2_freq = np.bincount(qfx2_altx.flatten()+1)[1:] smallest_altx = altx2_freq.argsort() smallest_cfreq = altx2_freq[smallest_altx] smallest_thresh = len(smallest_cfreq) - max_cands print('Current num alternatives = %r. Truncating to %r' % (len(altx2_freq), max_cands)) print('Frequency stats: '+str(helpers.mystats(altx2_freq[altx2_freq != 0]))) print('Correct alternative frequency = %r' % altx2_freq[correct_altx]) print('Correct alternative frequency rank = %r' % (np.where(smallest_altx == correct_altx)[0],)) if smallest_thresh > -1: freq_thresh = smallest_cfreq[smallest_thresh] print('Truncating at rank = %r' % smallest_thresh) print('Truncating at frequency = %r' % freq_thresh) to_remove_altx, = np.where(altx2_freq <= freq_thresh) qfx2_remove = np.in1d(qfx2_altx.flatten(), to_remove_altx) qfx2_remove.shape = qfx2_altx.shape qfx2_altx[qfx2_remove] = -1 keep_ids = True - np.in1d(alternative_ids, alternative_ids[to_remove_altx]) alternative_ids = alternative_ids[keep_ids] return alternative_ids, qfx2_altx def temporary_names(cx_list, nx_list, zeroed_cx_list=[], zeroed_nx_list=[]): '''Test Input: nx_list = np.array([(1, 5, 6), (2, 4, 0), (1, 1, 1), (5, 5, 5)]) cx_list = np.array([(2, 3, 4), (5, 6, 7), (8, 9, 10), (4, 5, 5)]) zeroed_nx_list = [] zeroed_cx_list = [3] ''' zeroed_cx_list = set(zeroed_cx_list) tmp_nx_list = [] for ix, (cx, nx) in enumerate(zip(cx_list.flat, nx_list.flat)): if cx in zeroed_cx_list: tmp_nx_list.append(0) elif nx in zeroed_nx_list: tmp_nx_list.append(0) elif nx >= 2: tmp_nx_list.append(nx) else: tmp_nx_list.append(-cx) tmp_nx_list = np.array(tmp_nx_list) tmp_nx_list = tmp_nx_list.reshape(cx_list.shape) return tmp_nx_list def build_pairwise_votes(alternative_ids, qfx2_altx): ''' Divides full rankings over alternatives into pairwise rankings. Assumes that the breaking has already been applied. e.g. alternative_ids = [0,1,2] qfx2_altx = np.array([(0, 1, 2), (1, 2, 0)]) ''' nAlts = len(alternative_ids) def generate_pairwise_votes(partial_order, compliment_order): pairwise_winners = [partial_order[rank:rank+1] for rank in xrange(0, len(partial_order))] pairwise_losers = [np.hstack((compliment_order, partial_order[rank+1:])) for rank in xrange(0, len(partial_order))] pairwise_vote_list = [helpers.cartesian((pwinners, plosers)) for pwinners, plosers in zip(pairwise_winners, pairwise_losers)] pairwise_votes = np.vstack(pairwise_vote_list) return pairwise_votes pairwise_mat = np.zeros((nAlts, nAlts)) nVoters = len(qfx2_altx) progstr = helpers.make_progress_fmt_str(nVoters, lbl='[voting] building P(d)') for ix, qfx in enumerate(xrange(nVoters)): helpers.print_(progstr % (ix+1)) partial_order = qfx2_altx[qfx] partial_order = partial_order[partial_order != -1] if len(partial_order) == 0: continue compliment_order = np.setdiff1d(alternative_ids, partial_order) pairwise_votes = generate_pairwise_votes(partial_order, compliment_order) def sum_win(ij): pairwise_mat[ij[0], ij[1]] += 1 # pairiwse wins on off-diagonal def sum_loss(ij): pairwise_mat[ij[1], ij[1]] -= 1 # pairiwse wins on off-diagonal map(sum_win, iter(pairwise_votes)) map(sum_loss, iter(pairwise_votes)) # Divide num voters PLmatrix = pairwise_mat / nVoters # = P(D) = Placket Luce GMoM function return PLmatrix def optimize(M): ''' alternative_ids = [0,1,2] qfx2_altx = np.array([(0,1,2), (1,0,2)]) M = PLmatrix M = pairwise_voting(alternative_ids, qfx2_altx) M = array([[-0.5, 0.5, 1. ], [ 0.5, -0.5, 1. ], [ 0. , 0. , -2. ]]) ''' print(r'[vote] x = argmin_x ||Mx||_2, s.t. ||x||_2 = 1') m = M.shape[0] x0 = np.ones(m)/np.sqrt(m) f = lambda x, M: linalg.norm(M.dot(x)) con = lambda x: linalg.norm(x) - 1 cons = {'type':'eq', 'fun': con} print('[vote] running optimization') with helpers.Timer() as t: res = scipy.optimize.minimize(f, x0, args=(M,), constraints=cons) x = res['x'] xnorm = linalg.norm(x) gamma = np.abs(x / xnorm) print('[voting_rules] x = %r' % (x,)) print('[voting_rules] xnorm = %r' % (xnorm,)) print('[voting_rules] gamma = %r' % (gamma,)) return gamma def optimize2(): x = linalg.solve(M, np.zeros(M.shape[0])) x /= linalg.norm(x) def PlacketLuce(vote, gamma): ''' e.g. gamma = optimize() vote = np.arange(len(gamma)) np.random.shuffle(vote) pr = PlacketLuce(vote, gamma) print(vote) print(pr) print('----') ''' m = len(vote)-1 pl_term = lambda x: gamma[vote[x]] / gamma[vote[x:]].sum() prob = np.prod([pl_term(x) for x in xrange(m)]) return prob #---- def viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum): num_top = 5 correct_rank = np.where(ranked_candiates == correct_altx)[0] if len(correct_rank) > 0: correct_rank = correct_rank[0] correct_score = ranked_scores[correct_rank] np.set_printoptions(precision=1) top_cands = ranked_candiates[0:num_top] top_scores = ranked_scores[0:num_top] print('[vote] top%r ranked cands = %r' % (num_top, top_scores)) print('[vote] top%r ranked scores = %r' % (num_top, top_cands)) print('[vote] correct candid = %r ' % correct_altx) print('[vote] correct ranking / score = %r / %r ' % (correct_rank, correct_score)) print('----') np.set_printoptions(precision=8) plt = df2.plt df2.figure(fignum=fnum, doclf=True, subplot=(1,1,1)) ax=plt.gca() #plt.plot([10,10,14,14,10],[2,4,4,2,2],'r') col_labels=map(lambda x: '%8d' % x, np.arange(num_top)+1) row_labels=['cand ids ', 'cand scores ', 'correct ranking ', 'correct score '] table_vals=[map(lambda x: '%8d' % x, top_cands), map(lambda x: '%8.2f' % x, top_scores), ['%8d' % (correct_rank)] + [' '] * (num_top-1), ['%8.2f' % correct_score] + [' '] * (num_top-1)] #matplotlib.table.Table # the rectangle is where I want to place the table #the_table = plt.table(cellText=table_vals, #rowLabels=row_labels, #colLabels=col_labels, #colWidths = [0.1]*num_top, #loc='center') def latex_table(row_labels, col_labels, table_vals): #matplotlib.rc('text', usetex=True) #print('col_labels=%r' % col_labels) #print('row_labels=%r' % row_labels) #print('table_vals=%r' % table_vals) nRows = len(row_labels) nCols = len(col_labels) def tableline(list_, rowlbl): return rowlbl + ' & '+(' & '.join(list_))+'\\\\' collbl = tableline(col_labels, ' '*16) col_strs = [collbl, '\hline'] + [tableline(rowvals, rowlbl) for rowlbl, rowvals in zip(row_labels, table_vals)] col_split = '\n' body = col_split.join(col_strs) col_placement = ' c || '+(' | '.join((['c']*nCols))) latex_str = textwrap.dedent(r''' \begin{tabular}{%s} %s \end{tabular} ''') % (col_placement, helpers.indent(body)) print(latex_str) plt.text(0, 0, latex_str, fontsize=14, horizontalalignment='left', verticalalignment='bottom', fontname='Courier New') #family='monospaced') #print(matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')) #print(matplotlib.font_manager.findfont('Courier')) #fontname r''' \begin{tabular}{ c || c | c | c | c | c} & 1 & 2 & 3 & 4 & 5\\ \hline cand ids & 3 & 38 & 32 & 40 & 5\\ cand scores & 4512.0 & 4279.0 & 4219.0 & 4100.0 & 3960.0\\ correct ranking & 25 & & & & \\ correct score & 1042.0 & & & & \\ \end{tabular} ''' latex_table(row_labels, col_labels, table_vals) df2.set_figtitle(title) def voting_rule(alternative_ids, qfx2_altx, qfx2_weight=None, rule='borda', correct_altx=None, fnum=1): K = qfx2_altx.shape[1] if rule == 'borda': score_vec = np.arange(0,K)[::-1] if rule == 'plurality': score_vec = np.zeros(K); score_vec[0] = 1 if rule == 'topk': score_vec = np.ones(K) score_vec = np.array(score_vec, dtype=np.int) print('----') title = 'Rule=%s Weighted=%r ' % (rule, not qfx2_weight is None) print('[vote] ' + title) print('[vote] score_vec = %r' % (score_vec,)) alt_score = weighted_positional_scoring_rule(alternative_ids, qfx2_altx, score_vec, qfx2_weight) ranked_candiates = alt_score.argsort()[::-1] ranked_scores = alt_score[ranked_candiates] viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum) return ranked_candiates, ranked_scores def weighted_positional_scoring_rule(alternative_ids, qfx2_altx, score_vec, qfx2_weight=None): nAlts = len(alternative_ids) alt_score = np.zeros(nAlts) if qfx2_weight is None: qfx2_weight = np.ones(qfx2_altx.shape) for qfx in xrange(len(qfx2_altx)): partial_order = qfx2_altx[qfx] weights = qfx2_weight[qfx] # Remove impossible votes weights = weights[partial_order != -1] partial_order = partial_order[partial_order != -1] for ix, altx in enumerate(partial_order): alt_score[altx] += weights[ix] * score_vec[ix] return alt_score def _normalize_voters_profile(hs, qcx, voters_profile): '''Applies a temporary labeling scheme''' cx2_nx = hs.tables.cx2_nx (qfx2_nx, qfx2_cx, qfx2_fx, qfx2_score, qfx2_valid) = voters_profile # Apply temporary alternative labels alts_cxs = np.unique(qfx2_cx[qfx2_valid].flatten()) alts_nxs = np.setdiff1d(np.unique(qfx2_nx[qfx2_valid].flatten()), [0]) nx2_altx = {nx:altx for altx, nx in enumerate(alts_nxs)} nx2_altx[0] = -1 qfx2_altx = np.copy(qfx2_nx) old_shape = qfx2_altx.shape qfx2_altx.shape = (qfx2_altx.size,) for i in xrange(len(qfx2_altx)): qfx2_altx[i] = nx2_altx[qfx2_altx[i]] qfx2_altx.shape = old_shape alternative_ids = np.arange(0, len(alts_nxs)) correct_altx = nx2_altx[cx2_nx[qcx]] # Ground truth labels qfx2_weight = qfx2_score return alternative_ids, qfx2_altx, qfx2_weight, correct_altx def viz_PLmatrix(PLmatrix, qfx2_altx=None, correct_altx=None, alternative_ids=None, fnum=1): if alternative_ids is None: alternative_ids = [] if correct_altx is None: correct_altx = -1 if qfx2_altx is None: nVoters = -1 else: nVoters = len(qfx2_altx) # Separate diagonal and off diagonal PLdiagonal = np.diagonal(PLmatrix) PLdiagonal.shape = (len(PLdiagonal), 1) PLoffdiag = PLmatrix.copy(); np.fill_diagonal(PLoffdiag, 0) # Build a figure fig = df2.plt.gcf() fig.clf() # Show the off diagonal colormap = 'hot' ax = fig.add_subplot(121) cax = ax.imshow(PLoffdiag, interpolation='nearest', cmap=colormap) stride = int(np.ceil(np.log10(len(alternative_ids)))+1)*10 correct_id = alternative_ids[correct_altx] alternative_ticks = sorted(alternative_ids[::stride].tolist() + [correct_id]) ax.set_xticks(alternative_ticks) ax.set_xticklabels(alternative_ticks) ax.set_yticks(alternative_ticks) ax.set_yticklabels(alternative_ticks) ax.set_xlabel('candiate ids') ax.set_ylabel('candiate ids.') ax.set_title('Off-Diagonal') fig.colorbar(cax, orientation='horizontal') # Show the diagonal ax = fig.add_subplot(122) def duplicate_cols(M, nCols): return np.tile(M, (1, nCols)) nCols = len(PLdiagonal) / 2 cax2 = ax.imshow(duplicate_cols(PLdiagonal, nCols), interpolation='nearest', cmap=colormap) ax.set_title('diagonal') ax.set_xticks([]) ax.set_yticks(alternative_ticks) ax.set_yticklabels(alternative_ticks) df2.set_figtitle('Correct ID=%r' % (correct_id)) fig.colorbar(cax2, orientation='horizontal') fig.subplots_adjust(left=0.05, right=.99, bottom=0.01, top=0.88, wspace=0.01, hspace=0.01) #plt.set_cmap('jet', plt.cm.jet,norm = LogNorm()) def test_voting_rules(hs, qcx, K, fnum=1): voters_profile = build_voters_profile(hs, qcx, K) normal_profile = _normalize_voters_profile(hs, qcx, voters_profile) alternative_ids, qfx2_altx, qfx2_weight, correct_altx = normal_profile #alternative_ids, qfx2_altx = filter_alternative_frequencies(alternative_ids, qfx2_altx, correct_altx) m = len(alternative_ids) n = len(qfx2_altx) k = len(qfx2_altx.T) bigo_breaking = helpers.int_comma_str((m+k)*k*n) bigo_gmm = helpers.int_comma_str(int(m**2.376)) bigo_gmm3 = helpers.int_comma_str(int(m**3)) print('[voting] m = num_alternatives = %r ' % len(alternative_ids)) print('[voting] n = nVoters = %r ' % len(qfx2_altx)) print('[voting] k = top_k_breaking = %r ' % len(qfx2_altx.T)) print('[voting] Computing breaking O((m+k)*k*n) = %s' % bigo_breaking) print('[voting] Computing GMoM breaking O(m^{2.376}) < O(m^3) = %s < %s' % (bigo_gmm, bigo_gmm3)) #--- def voting_rule_(weighting, rule_name, fnum): ranking = voting_rule(alternative_ids, qfx2_altx, weighting, rule_name, correct_altx, fnum) return ranking, fnum + 1 #weighted_topk_ranking, fnum = voting_rule_(qfx2_weight, 'topk', fnum) #weighted_borda_ranking, fnum = voting_rule_(qfx2_weight, 'borda', fnum) #weighted_plurality_ranking, fnum = voting_rule_(qfx2_weight, 'plurality', fnum) #topk_ranking, fnum = voting_rule_(None, 'topk', fnum) #borda_ranking, fnum = voting_rule_(None, 'borda', fnum) #plurality_ranking, fnum = voting_rule_(None, 'plurality', fnum) #--- PLmatrix = build_pairwise_votes(alternative_ids, qfx2_altx) viz_PLmatrix(PLmatrix, qfx2_altx, correct_altx, alternative_ids, fnum) # Took 52 seconds on bakerstreet with (41x41) matrix gamma = optimize(PLmatrix) # (41x41) -> 52 seconds gamma = optimize(PLmatrix[:-1,:-1]) # (40x40) -> 83 seconds gamma = optimize(PLmatrix[:-11,:-11]) # (30x30) -> 45 seconds) gamma = optimize(PLmatrix[:-21,:-21]) # (20x20) -> 21 seconds) gamma = optimize(PLmatrix[:-31,:-31]) # (10x10) -> 4 seconds) gamma = optimize(PLmatrix[:-36,:-36]) # ( 5x 5) -> 2 seconds) def PlacketLuceWinnerProb(gamma): nAlts = len(gamma) mask = np.ones(nAlts, dtype=np.bool) ax2_prob = np.zeros(nAlts) for ax in xrange(nAlts): mask[ax] = False ax2_prob[ax] = gamma[ax] / np.sum(gamma[mask]) mask[ax] = True ax2_prob = ax2_prob / ax2_prob.sum() return ax2_prob ax2_prob = PlacketLuceWinnerProb(gamma) pl_ranking = ax2_prob.argsort()[::-1] pl_confidence = ax2_prob[pl_ranking] correct_rank = np.where(pl_ranking == correct_altx)[0][0] ranked_altxconf = zip(pl_ranking, pl_confidence) print('Top 5 Ranked altx/confidence = %r' % (ranked_altxconf[0:5],)) print('Correct Rank=%r altx/confidence = %r' % (correct_rank, ranked_altxconf[correct_rank],)) df2.update() #b = np.zeros(4) #b[-1] = 1 #[- + +] #[+ - +] x = b #[+ + -] 1 #[1 0 0] #X = np.vstack([M,[1,0,0]]) #print(X) #print(b) #x = linalg.solve(X, b) def test(): from numpy import linalg linalg.lstsq ''' Test Data: K = 5 votes = [(3,2,1,4), (4,1,2,3), (4, 2, 3, 1), (1, 2, 3, 4)] qfx2_utilities = [[(nx, nx, nx**3, k) for k, nx in enumerate(vote)] for vote in votes] M, altx2_nx= _utilities2_pairwise_breaking(qfx2_utilities) from numpy.linalg import svd, inv from numpy import eye, diag, zeros #Because s is sorted, and M is rank deficient, the value s[-1] should be 0 np.set_printoptions(precision=2, suppress=True, linewidth=80) #The svd is: #u * s * v = M u.dot(diag(s)).dot(v) = M #u is unitary: inv(u).dot(u) == eye(len(s)) diag(s).dot(v) == inv(u).dot(M) u.dot(diag(s)) == M.dot(inv(v)) And because s[-1] is 0 u.dot(diag(s))[:,-1:] == zeros((len(s),1)) Because we want to find Mx = 0 So flip the left and right sides M.dot(inv(v)[:,-1:]) == u.dot(diag(s))[:,-1:] And you find M = M x = inv(v)[:,-1:] 0 = u.dot(diag(s))[:,-1:] So we have the solution to our problem as x = inv(v)[:,-1:] Furthermore it is true that inv(v)[:,-1:].T == v[-1:,:] because v is unitary and the last vector in v corresponds to a singular vector because M is rank m-1 ALSO: v.dot(inv(v)) = eye(len(s)) so v[-1].dot(inv(v)[:,-1:]) == 1 this means that v[-1] is non-zero, and v[-1].T == inv(v[:,-1:]) So all of this can be done as... ''' # We could also say def eq(M1, M2): print(str(M1)+'\n = \n'+str(M2)) # Compute SVD (u, s_, v) = linalg.svd(M) s = diag(s_) #--- print('-------') print('M =\n%s' % (M,)) print('-------') print('u =\n%s' % (u,)) print('-------') print('s =\n%s' % (s,)) print('-------') print('v =\n%s' % (v,)) print('-------') print('u s v = M') eq(u.dot(s).dot(v), M) # We want to find Mx = 0 print('-------') print('The last value of s is zeros because M is rank m-1 and s is sorted') print('s =\n%s' % (s,)) print('-------') print('Therefore the last column of u.dot(s) is zeros') print('v is unitary so v.T = inv(v)') print('u s = M v.T') eq(u.dot(s), M.dot(v.T)) print('-------') print('We want to find Mx = 0, and the last column of LHS corresponds to this') print('u s = M v.T') eq(u.dot(s), M.dot(v.T)) # The right column u.dot(s) is #Ok, so v[-1] can be negative, but that's ok # its unitary, we can just negate it. # or we can take the absolute value or l2 normalize it # x = v[-1] = inv(v)[:,-1] # so # x.dot(x) == 1 # hmmmm # I need to find a way to proove # components of x are all negative or all # positive # Verify s is 0 x = v[-1]
apache-2.0
arcyfelix/ML-DL-AI
Supervised Learning/GANs/GAN.py
1
3364
# -*- coding: utf-8 -*- """ GAN Example Use a generative adversarial network (GAN) to generate digit images from a noise distribution. References: - Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza, B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information processing systems, 2672-2680. Links: - [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf). """ from __future__ import division, print_function, absolute_import import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tflearn # Data loading and preprocessing import tflearn.datasets.mnist as mnist X, Y, testX, testY = mnist.load_data() image_dim = 784 # 28*28 pixels z_dim = 200 # Noise data points total_samples = len(X) # Generator def generator(x, reuse=False): with tf.variable_scope('Generator', reuse=reuse): x = tflearn.fully_connected(x, 256, activation='relu') x = tflearn.fully_connected(x, image_dim, activation='sigmoid') return x # Discriminator def discriminator(x, reuse=False): with tf.variable_scope('Discriminator', reuse=reuse): x = tflearn.fully_connected(x, 256, activation='relu') x = tflearn.fully_connected(x, 1, activation='sigmoid') return x # Build Networks gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise') disc_input = tflearn.input_data(shape=[None, 784], name='disc_input') gen_sample = generator(gen_input) disc_real = discriminator(disc_input) disc_fake = discriminator(gen_sample, reuse=True) # Define Loss disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake)) gen_loss = -tf.reduce_mean(tf.log(disc_fake)) # Build Training Ops for both Generator and Discriminator. # Each network optimization should only update its own variable, thus we need # to retrieve each network variables (with get_layer_variables_by_scope) and set # 'placeholder=None' because we do not need to feed any target. gen_vars = tflearn.get_layer_variables_by_scope('Generator') gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam', loss=gen_loss, trainable_vars=gen_vars, batch_size=64, name='target_gen', op_name='GEN') disc_vars = tflearn.get_layer_variables_by_scope('Discriminator') disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam', loss=disc_loss, trainable_vars=disc_vars, batch_size=64, name='target_disc', op_name='DISC') # Define GAN model, that output the generated images. gan = tflearn.DNN(gen_model) # Training # Generate noise to feed to the generator z = np.random.uniform(-1., 1., size=[total_samples, z_dim]) # Start training, feed both noise and real images. gan.fit(X_inputs={gen_input: z, disc_input: X}, Y_targets=None, n_epoch=100) # Generate images from noise, using the generator network. f, a = plt.subplots(2, 10, figsize=(10, 4)) for i in range(10): for j in range(2): # Noise input. z = np.random.uniform(-1., 1., size=[1, z_dim]) # Generate image from noise. Extend to 3 channels for matplot figure. temp = [[ii, ii, ii] for ii in list(gan.predict([z])[0])] a[j][i].imshow(np.reshape(temp, (28, 28, 3))) f.show() plt.draw() plt.waitforbuttonpress()
apache-2.0
Daniel-Brosnan-Blazquez/DIT-100
debugging/trajectory_planning_profiles/trapezoidal-profile.py
1
7690
import numpy import time from matplotlib import pyplot def main (params): angle = params['p0'] vel = params['v0'] sign = params['sign'] # Plan the trajectory if it is not planned T = 0 Ta = 0 Td = 0 dt = params['dt'] if not params['trajectory']: # Maximum acceleration and velocity values in degrees/s^2 and # degrees/s respectively amax = params['acc_limit_d']*sign*(-1) vmax = params['vel_limit']*sign*(-1) v0 = vel h = angle vlim = vmax # Check if the trajectory is feasible print "abs (amax*h) >= v0**2/2.0 = %s" % (abs (amax*h) >= v0**2/2.0) if abs (amax*h) >= v0**2/2.0: # The trajectory is feasible # Check if the maximum value of velocity can be reached if abs (h*amax) > vmax**2 - v0**2/2.0: # The maximum value of velocity can be reached Ta = (vmax - v0)/amax Td = vmax/amax term1 = abs (h/vmax) term2 = (vmax/(2*amax)) * (1 - (v0/vmax))**2 term3 = (vmax/(2*amax)) T = term1 + term2 + term3 else: # The maximum value of velocity can't be reached vlim = ((abs (h * amax) + v0**2/2.0)**(1/2.0))*sign*(-1) Ta = abs ((vlim - v0)/amax) Td = abs (vlim/amax) T = Ta + Td # end if # The time has to be positive Ta = abs (Ta) Td = abs (Td) T = abs (T) print "Ta = %s, Td = %s" % (Ta, Td) params['trajectory'] = True params['T'] = T params['Ta'] = Ta params['Td'] = Td params['T_sign'] = sign*(-1) params['vv'] = vlim # if Ta > dt and Td > dt: # params['trajectory'] = True # params['T'] = T # params['Ta'] = Ta # params['Td'] = Td # params['T_sign'] = sign*(-1) # params['vv'] = vlim # else: # Ta = 0 # Td = 0 # T = 0 # end if # end if return def plot (params): t = 0 interval = params['dt'] # Sign sign = params['T_sign'] # Maximum values amax = params['acc_limit_d']*sign vmax = params['vel_limit']*sign # Buffers to store the motion positions = [] vels = [] accs = [] # Initial values of the motion v0 = params['v0'] p0 = params['p0'] vv = params['vv'] T = params['T'] Ta = params['Ta'] Td = params['Td'] # Acceleration phase while t < Ta: # Position pos = p0 + v0*t + ((vv - v0)/(2*Ta))*t**2 positions.append (pos) # Velocity vel = v0 + ((vv - v0)/(Ta))*t vels.append (vel) # Acceleration acc = (vv - v0)/Ta accs.append (acc) t += interval # end while # Constant velocity phase while t < (T - Td): # Position pos = p0 + v0*(Ta/2.0) + vv*(t-(Ta/2.0)) positions.append (pos) # Velocity vel = vv vels.append (vel) # Acceleration acc = 0 accs.append (acc) t += interval # end while # Deceleration phase while t < T: # Position pos = 0 - (vv/(2*Td))*(T-t)**2 positions.append (pos) # Velocity vel = (vv/Td)*(T-t) vels.append (vel) # Acceleration acc = -(vv/Td) accs.append (acc) t += interval # end while fig = pyplot.figure (1, figsize = (20,10)) s = fig.add_subplot (311) p, = s.plot(positions) s.grid (True) s.set_title ("position") s = fig.add_subplot (312) p, = s.plot(vels) s.grid (True) s.set_title ("velocity") s = fig.add_subplot (313) p, = s.plot(accs) s.grid (True) s.set_title ("acceleration") pyplot.show () pyplot.close (1) return if __name__ == "__main__": params = {} # Period params['dt'] = 0.015 # Flag to indicate if it is necessary to compute the trajectory # (not needed here) params['trajectory'] = False # Velocity, acceleration and jerk limits in degrees/s^2 params['vel_limit'] = 150.0 rad_to_degrees = 180.0/numpy.pi radius = 0.3 # m/s^2 params['acc_limit'] = 7.5 # degrees/s^2 params['acc_limit_d'] = (params['acc_limit']*rad_to_degrees)/radius # # p0 = 0. Checked, trajectory unfeasible # # p0 # params['p0'] = 0.0 # # v0 # params['v0'] = 100.0 # p0 > 50 v0 = 0. Checked, trajectory feasible # p0 params['p0'] = 80.0 # v0 params['v0'] = 0.0 # # p0 > 50 v0 < limit. Checked, trajectory feasible # # p0 # params['p0'] = 80.0 # # v0 # params['v0'] = 50.0 # # p0 > 50 v0 = limit. Checked, trajectory feasible # # p0 # params['p0'] = 80.0 # # v0 # params['v0'] = 100.0 # # p0 > 50 v0 > limit. Checked, trajectory feasible # # p0 # params['p0'] = 80.0 # # v0 # params['v0'] = -150.0 # # p0 < 50 p0 > 0 v0 = 0. Checked, trajectory feasible # # p0 # params['p0'] = 20.0 # # v0 # params['v0'] = 0.0 # # p0 < 50 p0 > 0 v0 < limit. REVIEW IT!!!!!!!!! # # p0 # params['p0'] = 20.0 # # v0 # params['v0'] = 50.0 # # p0 < 50 p0 > 0 v0 = limit. Checked, trajectory feasible # # p0 # params['p0'] = 20.0 # # v0 # params['v0'] = 100.0 # # p0 < 50 p0 > 0 v0 > limit. Checked, trajectory feasible # # p0 # params['p0'] = 20.0 # # v0 # params['v0'] = 150.0 # # p0 < -50 v0 = 0. Checked, trajectory feasible # # p0 # params['p0'] = -80.0 # # v0 # params['v0'] = 0.0 # # p0 < -50 v0 < limit. Checked, trajectory feasible # # p0 # params['p0'] = -80.0 # # v0 # params['v0'] = 50.0 # # p0 < -50 v0 = limit. Checked, trajectory feasible # # p0 # params['p0'] = -80.0 # # v0 # params['v0'] = 100.0 # # p0 < -50 v0 > limit. Checked, trajectory feasible # # p0 # params['p0'] = -80.0 # # v0 # params['v0'] = 150.0 # # p0 > -50 p0 < 0 v0 = 0. Checked, trajectory feasible # # p0 # params['p0'] = -20.0 # # v0 # params['v0'] = 0.0 # # p0 > -50 p0 < 0 v0 < limit. Checked, trajectory feasible # # p0 # params['p0'] = -20.0 # # v0 # params['v0'] = -50.0 # # p0 > -50 p0 < 0 v0 = limit. Checked, trajectory feasible # # p0 # params['p0'] = -20.0 # # v0 # params['v0'] = 100.0 # # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible # # p0 # params['p0'] = -20.0 # # v0 # params['v0'] = 150.0 # # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible # # p0 # params['p0'] = -20.0 # # v0 # params['v0'] = 200.0 # sign params['sign'] = 1 # params['sign'] = -1 # # p0 # params['p0'] = 11.0962258945 # # params['p0'] = 22.0 # # v0 # params['v0'] = 71.19 # # params['v0'] = 0.0 main(params) print "Trajectory performed: %s" % params['trajectory'] if params['trajectory']: T = params['T'] Ta = params['Ta'] Td = params['Td'] print "T = %s, Ta = %s, Td = %s" %(T, Ta, Td) plot (params)
gpl-3.0
rigetticomputing/grove
grove/tomography/state_tomography.py
1
11664
############################################################################## # Copyright 2017-2018 Rigetti Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## import logging import numpy as np import matplotlib.pyplot as plt from pyquil.quilbase import Pragma from scipy.sparse import csr_matrix, coo_matrix from pyquil.quil import Program import grove.tomography.operator_utils from grove.tomography.tomography import TomographyBase, TomographySettings, DEFAULT_SOLVER_KWARGS from grove.tomography import tomography import grove.tomography.utils as ut import grove.tomography.operator_utils as o_ut _log = logging.getLogger(__name__) qt = ut.import_qutip() cvxpy = ut.import_cvxpy() UNIT_TRACE = 'unit_trace' POSITIVE = 'positive' DEFAULT_STATE_TOMO_SETTINGS = TomographySettings( constraints={UNIT_TRACE}, solver_kwargs=DEFAULT_SOLVER_KWARGS ) def _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops): """ Prepare the coefficient matrix for state tomography. This function uses sparse matrices for much greater efficiency. The coefficient matrix is defined as: .. math:: C_{(jk)m} = \tr{\Pi_{s_j} \Lambda_k(P_m)} = \sum_{r}\pi_{jr}(\mathcal{R}_{k})_{rm} where :math:`\Lambda_k(\cdot)` is the quantum map corresponding to the k-th pre-measurement channel, i.e., :math:`\Lambda_k(\rho) = E_k \rho E_k^\dagger` where :math:`E_k` is the k-th channel operator. This map can also be represented via its transfer matrix :math:`\mathcal{R}_{k}`. In that case one also requires the overlap between the (generalized) Pauli basis ops and the projection operators :math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`. See the grove documentation on tomography for detailed information. :param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation. :param list channel_ops: The pre-measurement channel operators as `qutip.Qobj` :return: The coefficient matrix necessary to set up the binomial state tomography problem. :rtype: scipy.sparse.csr_matrix """ channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek)) for ek in channel_ops] # This bit could be more efficient but does not run super long and is thus preserved for # readability. pi_jr = csr_matrix( [pauli_basis.project_op(n_j).toarray().ravel() for n_j in readout_povm.ops]) # Dict used for constructing our sparse matrix, keys are tuples (row_index, col_index), values # are the non-zero elements of the final matrix. c_jk_m_elms = {} # This explicitly exploits the sparsity of all operators involved for k in range(len(channel_ops)): pi_jr__rk_rm = (pi_jr * channel_transfer_matrices[k]).tocoo() for (j, m, val) in ut.izip(pi_jr__rk_rm.row, pi_jr__rk_rm.col, pi_jr__rk_rm.data): # The multi-index (j,k) is enumerated in column-major ordering (like Fortran arrays) c_jk_m_elms[(j + k * readout_povm.pi_basis.dim, m)] = val.real # create sparse matrix from COO-format (see scipy.sparse docs) _keys, _values = ut.izip(*c_jk_m_elms.items()) _rows, _cols = ut.izip(*_keys) c_jk_m = coo_matrix((list(_values), (list(_rows), list(_cols))), shape=(readout_povm.pi_basis.dim * len(channel_ops), pauli_basis.dim)).tocsr() return c_jk_m class StateTomography(TomographyBase): """ A StateTomography object encapsulates the result of quantum state estimation from tomographic data. It provides convenience functions for visualization and computing state fidelities. """ __tomography_type__ = "STATE" @staticmethod def estimate_from_ssr(histograms, readout_povm, channel_ops, settings): """ Estimate a density matrix from single shot histograms obtained by measuring bitstrings in the Z-eigenbasis after application of given channel operators. :param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`. :param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier. :param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s. :param TomographySettings settings: The solver and estimation settings. :return: The generated StateTomography object. :rtype: StateTomography """ nqc = len(channel_ops[0].dims[0]) pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc pi_basis = readout_povm.pi_basis if not histograms.shape[1] == pi_basis.dim: # pragma no coverage raise ValueError("Currently tomography is only implemented for two-level systems.") # prepare the log-likelihood function parameters, see documentation n_kj = np.asarray(histograms) c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops) rho_m = cvxpy.Variable(pauli_basis.dim) p_jk = c_jk_m * rho_m obj = -n_kj.ravel() * cvxpy.log(p_jk) p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order # Default constraints: # MLE must describe valid probability distribution # i.e., for each k, p_jk must sum to one and be element-wise non-negative: # 1. \sum_j p_jk == 1 for all k # 2. p_jk >= 0 for all j, k # where p_jk = \sum_m c_jk_m rho_m constraints = [ p_jk >= 0, np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1, ] rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm) for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0) if POSITIVE in settings.constraints: if tomography._SDP_SOLVER.is_functional(): constraints.append(rho_m_real_imag >> 0) else: # pragma no coverage _log.warning("No convex solver capable of semi-definite problems installed.\n" "Dropping the positivity constraint on the density matrix.") if UNIT_TRACE in settings.constraints: # this assumes that the first element of the Pauli basis is always proportional to # the identity constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real) prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) _log.info("Starting convex solver") prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs) if prob.status != cvxpy.OPTIMAL: # pragma no coverage _log.warning("Problem did not converge to optimal solution. " "Solver settings: {}".format(settings.solver_kwargs)) return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings) def __init__(self, rho_coeffs, pauli_basis, settings): """ Construct a StateTomography to encapsulate the result of estimating the quantum state from a quantum tomography measurement. :param numpy.ndarray r_est: The estimated quantum state represented in a given (generalized) Pauli basis. :param OperatorBasis pauli_basis: The employed (generalized) Pauli basis. :param TomographySettings settings: The settings used to estimate the state. """ self.rho_coeffs = rho_coeffs self.pauli_basis = pauli_basis self.rho_est = sum((r_m * p_m for r_m, p_m in ut.izip(rho_coeffs, pauli_basis.ops))) self.settings = settings def fidelity(self, other): """ Compute the quantum state fidelity of the estimated state with another state. :param qutip.Qobj other: The other quantum state. :return: The fidelity, a real number between 0 and 1. :rtype: float """ return qt.fidelity(self.rho_est, other) def plot_state_histogram(self, ax): """ Visualize the complex matrix elements of the estimated state. :param matplotlib.Axes ax: A matplotlib Axes object to plot into. """ title = "Estimated state" nqc = int(round(np.log2(self.rho_est.data.shape[0]))) labels = ut.basis_labels(nqc) return ut.state_histogram(self.rho_est, ax, title) def plot(self): """ Visualize the state. :return: The generated figure. :rtype: matplotlib.Figure """ width = 10 # The pleasing golden ratio. height = width / 1.618 f = plt.figure(figsize=(width, height)) ax = f.add_subplot(111, projection="3d") self.plot_state_histogram(ax) return f def state_tomography_programs(state_prep, qubits=None, rotation_generator=tomography.default_rotations): """ Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in the program should be tomographically rotated. :param Program state_prep: The program to prepare the state to be tomographed. :param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If `None`, performs it on all in state_prep. :param generator rotation_generator: A generator that yields tomography rotations to perform. :return: Program for state tomography. :rtype: Program """ if qubits is None: qubits = state_prep.get_qubits() for tomography_program in rotation_generator(*qubits): state_tomography_program = Program(Pragma("PRESERVE_BLOCK")) state_tomography_program.inst(state_prep) state_tomography_program.inst(tomography_program) state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK")) yield state_tomography_program def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False): """ Method to perform both a QPU and QVM state tomography, and use the latter as as reference to calculate the fidelity of the former. :param Program preparation_program: Program to execute. :param int nsamples: Number of samples to take for the program. :param QVMConnection|QPUConnection cxn: Connection on which to run the program. :param list qubits: List of qubits for the program. to use in the tomography analysis. :param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run`` instead of ``cxn.run_and_measure``. :return: The state tomogram. :rtype: StateTomography """ return tomography._do_tomography(preparation_program, nsamples, cxn, qubits, tomography.MAX_QUBITS_STATE_TOMO, StateTomography, state_tomography_programs, DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
apache-2.0
giorgiop/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
73
1232
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) lw = 2 plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw, label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw, label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw, label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw, label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw, label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw, linestyle='--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
fmfn/UnbalancedDataset
examples/applications/plot_outlier_rejections.py
2
4354
""" =============================================================== Customized sampler to implement an outlier rejections estimator =============================================================== This example illustrates the use of a custom sampler to implement an outlier rejections estimator. It can be used easily within a pipeline in which the number of samples can vary during training, which usually is a limitation of the current scikit-learn pipeline. """ # Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com> # License: MIT import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_moons, make_blobs from sklearn.ensemble import IsolationForest from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from imblearn import FunctionSampler from imblearn.pipeline import make_pipeline print(__doc__) rng = np.random.RandomState(42) def plot_scatter(X, y, title): """Function to plot some data as a scatter plot.""" plt.figure() plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1") plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0") plt.legend() plt.title(title) ############################################################################## # Toy data generation ############################################################################## ############################################################################## # We are generating some non Gaussian data set contaminated with some unform # noise. moons, _ = make_moons(n_samples=500, noise=0.05) blobs, _ = make_blobs( n_samples=500, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25 ) outliers = rng.uniform(low=-3, high=3, size=(500, 2)) X_train = np.vstack([moons, blobs, outliers]) y_train = np.hstack( [ np.ones(moons.shape[0], dtype=np.int8), np.zeros(blobs.shape[0], dtype=np.int8), rng.randint(0, 2, size=outliers.shape[0], dtype=np.int8), ] ) plot_scatter(X_train, y_train, "Training dataset") ############################################################################## # We will generate some cleaned test data without outliers. moons, _ = make_moons(n_samples=50, noise=0.05) blobs, _ = make_blobs( n_samples=50, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25 ) X_test = np.vstack([moons, blobs]) y_test = np.hstack( [np.ones(moons.shape[0], dtype=np.int8), np.zeros(blobs.shape[0], dtype=np.int8)] ) plot_scatter(X_test, y_test, "Testing dataset") ############################################################################## # How to use the :class:`~imblearn.FunctionSampler` ############################################################################## ############################################################################## # We first define a function which will use # :class:`~sklearn.ensemble.IsolationForest` to eliminate some outliers from # our dataset during training. The function passed to the # :class:`~imblearn.FunctionSampler` will be called when using the method # ``fit_resample``. def outlier_rejection(X, y): """This will be our function used to resample our dataset.""" model = IsolationForest(max_samples=100, contamination=0.4, random_state=rng) model.fit(X) y_pred = model.predict(X) return X[y_pred == 1], y[y_pred == 1] reject_sampler = FunctionSampler(func=outlier_rejection) X_inliers, y_inliers = reject_sampler.fit_resample(X_train, y_train) plot_scatter(X_inliers, y_inliers, "Training data without outliers") ############################################################################## # Integrate it within a pipeline ############################################################################## ############################################################################## # By elimnating outliers before the training, the classifier will be less # affected during the prediction. pipe = make_pipeline( FunctionSampler(func=outlier_rejection), LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng), ) y_pred = pipe.fit(X_train, y_train).predict(X_test) print(classification_report(y_test, y_pred)) clf = LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng) y_pred = clf.fit(X_train, y_train).predict(X_test) print(classification_report(y_test, y_pred)) plt.show()
mit
weidel-p/nest-simulator
pynest/examples/pulsepacket.py
12
11358
# -*- coding: utf-8 -*- # # pulsepacket.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Pulse packet example -------------------- This script compares the average and individual membrane potential excursions in response to a single pulse packet with an analytically acquired voltage trace (see: Diesmann [1]_) A pulse packet is a transient spike volley with a Gaussian rate profile. The user can specify the neural parameters, the parameters of the pulse-packet and the number of trials. References ~~~~~~~~~~~~ .. [1] Diesmann M. 2002. Dissertation. Conditions for stable propagation of synchronous spiking in cortical neural networks: Single neuron dynamics and network properties. http://d-nb.info/968772781/34. """ ############################################################################### # First, we import all necessary modules for simulation, analysis and # plotting. import scipy.special as sp import nest import numpy import matplotlib.pyplot as plt # Properties of pulse packet: a = 100 # number of spikes in one pulse packet sdev = 10. # width of pulse packet (ms) weight = 0.1 # PSP amplitude (mV) pulsetime = 500. # occurrence time (center) of pulse-packet (ms) # Network and neuron characteristics: n_neurons = 100 # number of neurons cm = 200. # membrane capacitance (pF) tau_s = 0.5 # synaptic time constant (ms) tau_m = 20. # membrane time constant (ms) V0 = 0.0 # resting potential (mV) Vth = numpy.inf # firing threshold, high value to avoid spiking # Simulation and analysis parameters: simtime = 1000. # how long we simulate (ms) simulation_resolution = 0.1 # (ms) sampling_resolution = 1. # for voltmeter (ms) convolution_resolution = 1. # for the analytics (ms) # Some parameters in base units. Cm = cm * 1e-12 # convert to Farad Weight = weight * 1e-12 # convert to Ampere Tau_s = tau_s * 1e-3 # convert to sec Tau_m = tau_m * 1e-3 # convert to sec Sdev = sdev * 1e-3 # convert to sec Convolution_resolution = convolution_resolution * 1e-3 # convert to sec ############################################################################### # This function calculates the membrane potential excursion in response # to a single input spike (the equation is given for example in Diesmann [1]_, # eq.2.3). # It expects: # # * ``Time``: a time array or a single time point (in sec) # * ``Tau_s`` and ``Tau_m``: the synaptic and the membrane time constant (in sec) # * ``Cm``: the membrane capacity (in Farad) # * ``Weight``: the synaptic weight (in Ampere) # # It returns the provoked membrane potential (in mV) def make_psp(Time, Tau_s, Tau_m, Cm, Weight): term1 = (1 / Tau_s - 1 / Tau_m) term2 = numpy.exp(-Time / Tau_s) term3 = numpy.exp(-Time / Tau_m) PSP = (Weight / Cm * numpy.exp(1) / Tau_s * (((-Time * term2) / term1) + (term3 - term2) / term1 ** 2)) return PSP * 1e3 ############################################################################### # This function finds the exact location of the maximum of the PSP caused by a # single input spike. The location is obtained by setting the first derivative # of the equation for the PSP (see ``make_psp()``) to zero. The resulting # equation can be expressed in terms of a `LambertW function`. # This function expects: # # * ``Tau_s`` and ``Tau_m``: the synaptic and membrane time constant (in sec) # # It returns the location of the maximum (in sec) def LambertWm1(x): # Using scipy to mimic the gsl_sf_lambert_Wm1 function. return sp.lambertw(x, k=-1 if x < 0 else 0).real def find_loc_pspmax(tau_s, tau_m): var = tau_m / tau_s lam = LambertWm1(-numpy.exp(-1 / var) / var) t_maxpsp = (-var * lam - 1) / var / (1 / tau_s - 1 / tau_m) * 1e-3 return t_maxpsp ############################################################################### # First, we construct a Gaussian kernel for a given standard derivation # (``sig``) and mean value (``mu``). In this case the standard derivation is # the width of the pulse packet (see [1]_). sig = Sdev mu = 0.0 x = numpy.arange(-4 * sig, 4 * sig, Convolution_resolution) term1 = 1 / (sig * numpy.sqrt(2 * numpy.pi)) term2 = numpy.exp(-(x - mu) ** 2 / (sig ** 2 * 2)) gauss = term1 * term2 * Convolution_resolution ############################################################################### # Second, we calculate the PSP of a neuron due to a single spiking input. # (see Diesmann 2002, eq. 2.3). # Since we do that in discrete time steps, we first construct an array # (``t_psp``) that contains the time points we want to consider. Then, the # function ``make_psp()`` (that creates the PSP) takes the time array as its # first argument. t_psp = numpy.arange(0, 10 * (Tau_m + Tau_s), Convolution_resolution) psp = make_psp(t_psp, Tau_s, Tau_m, Cm, Weight) ############################################################################### # Now, we want to normalize the PSP amplitude to one. We therefore have to # divide the PSP by its maximum ([1]_ sec 6.1). The function # ``find_loc_pspmax()`` returns the exact time point (``t_pspmax``) when we # expect the maximum to occur. The function ``make_psp()`` calculates the # corresponding PSP value, which is our PSP amplitude (``psp_amp``). t_pspmax = find_loc_pspmax(Tau_s, Tau_m) psp_amp = make_psp(t_pspmax, Tau_s, Tau_m, Cm, Weight) psp_norm = psp / psp_amp ############################################################################### # Now we have all ingredients to compute the membrane potential excursion # (`U`). This calculation implies a convolution of the Gaussian with the # normalized PSP (see [1]_, eq. 6.9). In order to avoid an offset in the # convolution, we need to add a pad of zeros on the left side of the # normalized PSP. Later on we want to compare our analytical results with the # simulation outcome. Therefore we need a time vector (`t_U`) with the correct # temporal resolution, which places the excursion of the potential at the # correct time. psp_norm = numpy.pad(psp_norm, [len(psp_norm) - 1, 1]) U = a * psp_amp * numpy.convolve(gauss, psp_norm) ulen = len(U) t_U = (convolution_resolution * numpy.linspace(-ulen / 2., ulen / 2., ulen) + pulsetime + 1.) ############################################################################### # In this section we simulate a network of multiple neurons. # All these neurons receive an individual pulse packet that is drawn from a # Gaussian distribution. # # We reset the Kernel, define the simulation resolution and set the # verbosity using ``set_verbosity`` to suppress info messages. nest.ResetKernel() nest.SetKernelStatus({'resolution': simulation_resolution}) nest.set_verbosity("M_WARNING") ############################################################################### # Afterwards we create several neurons, the same amount of # pulse-packet-generators and a voltmeter. All these nodes/devices # have specific properties that are specified in device specific # dictionaries (here: `neuron_pars` for the neurons, `ppg_pars` # for the and pulse-packet-generators and `vm_pars` for the voltmeter). neuron_pars = { 'V_th': Vth, 'tau_m': tau_m, 'tau_syn_ex': tau_s, 'C_m': cm, 'E_L': V0, 'V_reset': V0, 'V_m': V0 } neurons = nest.Create('iaf_psc_alpha', n_neurons, neuron_pars) ppg_pars = { 'pulse_times': [pulsetime], 'activity': a, 'sdev': sdev } ppgs = nest.Create('pulsepacket_generator', n_neurons, ppg_pars) vm_pars = {'interval': sampling_resolution} vm = nest.Create('voltmeter', 1, vm_pars) ############################################################################### # Now, we connect each pulse generator to one neuron via static synapses. # We want to keep all properties of the static synapse constant except the # synaptic weight. Therefore we change the weight with the help of the command # ``SetDefaults``. # The command ``Connect`` connects all kinds of nodes/devices. Since multiple # nodes/devices can be connected in different ways e.g., each source connects # to all targets, each source connects to a subset of targets or each source # connects to exactly one target, we have to specify the connection. In our # case we use the ``one_to_one`` connection routine since we connect one pulse # generator (source) to one neuron (target). # In addition we also connect the `voltmeter` to the `neurons`. nest.SetDefaults('static_synapse', {'weight': weight}) nest.Connect(ppgs, neurons, 'one_to_one') nest.Connect(vm, neurons) ############################################################################### # In the next step we run the simulation for a given duration in ms. nest.Simulate(simtime) ############################################################################### # Finally, we record the membrane potential, when it occurred and to which # neuron it belongs. The sender and the time point of a voltage # data point at position x in the voltage array (``V_m``), can be found at the # same position x in the sender (`senders`) and the time array (`times`). Vm = vm.get('events', 'V_m') times = vm.get('events', 'times') senders = vm.get('events', 'senders') ############################################################################### # Here we plot the membrane potential derived from the theory and from the # simulation. Since we simulate multiple neurons that received slightly # different pulse packets, we plot the individual and the averaged membrane # potentials. # # We plot the analytical solution U (the resting potential V0 shifts the # membrane potential up or downwards). plt.plot(t_U, U + V0, 'r', lw=2, zorder=3, label='analytical solution') ############################################################################### # Then we plot all individual membrane potentials. # The time axes is the range of the simulation time in steps of ms. Vm_single = [Vm[senders == n.global_id] for n in neurons] simtimes = numpy.arange(1, simtime) for idn in range(n_neurons): if idn == 0: plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1, label='single potentials') else: plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1) ############################################################################### # Finally, we plot the averaged membrane potential. Vm_average = numpy.mean(Vm_single, axis=0) plt.plot(simtimes, Vm_average, 'b', lw=4, zorder=2, label='averaged potential') plt.legend() plt.xlabel('time (ms)') plt.ylabel('membrane potential (mV)') plt.xlim((-5 * (tau_m + tau_s) + pulsetime, 10 * (tau_m + tau_s) + pulsetime)) plt.show()
gpl-2.0
Lyleo/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py
69
20839
""" A backend for FLTK Copyright: Gregory Lielens, Free Field Technologies SA and John D. Hunter 2004 This code is released under the matplotlib license """ from __future__ import division import os, sys, math import fltk as Fltk from backend_agg import FigureCanvasAgg import os.path import matplotlib from matplotlib import rcParams, verbose from matplotlib.cbook import is_string_like from matplotlib.backend_bases import \ RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\ NavigationToolbar2, cursors from matplotlib.figure import Figure from matplotlib._pylab_helpers import Gcf import matplotlib.windowing as windowing from matplotlib.widgets import SubplotTool import thread,time Fl_running=thread.allocate_lock() def Fltk_run_interactive(): global Fl_running if Fl_running.acquire(0): while True: Fltk.Fl.check() time.sleep(0.005) else: print "fl loop already running" # the true dots per inch on the screen; should be display dependent # see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi PIXELS_PER_INCH = 75 cursord= { cursors.HAND: Fltk.FL_CURSOR_HAND, cursors.POINTER: Fltk.FL_CURSOR_ARROW, cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS, cursors.MOVE: Fltk.FL_CURSOR_MOVE } special_key={ Fltk.FL_Shift_R:'shift', Fltk.FL_Shift_L:'shift', Fltk.FL_Control_R:'control', Fltk.FL_Control_L:'control', Fltk.FL_Control_R:'control', Fltk.FL_Control_L:'control', 65515:'win', 65516:'win', } def error_msg_fltk(msg, parent=None): Fltk.fl_message(msg) def draw_if_interactive(): if matplotlib.is_interactive(): figManager = Gcf.get_active() if figManager is not None: figManager.canvas.draw() def ishow(): """ Show all the figures and enter the fltk mainloop in another thread This allows to keep hand in interractive python session Warning: does not work under windows This should be the last line of your script """ for manager in Gcf.get_all_fig_managers(): manager.show() if show._needmain: thread.start_new_thread(Fltk_run_interactive,()) show._needmain = False def show(): """ Show all the figures and enter the fltk mainloop This should be the last line of your script """ for manager in Gcf.get_all_fig_managers(): manager.show() #mainloop, if an fltk program exist no need to call that #threaded (and interractive) version if show._needmain: Fltk.Fl.run() show._needmain = False show._needmain = True def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ FigureClass = kwargs.pop('FigureClass', Figure) figure = FigureClass(*args, **kwargs) window = Fltk.Fl_Double_Window(10,10,30,30) canvas = FigureCanvasFltkAgg(figure) window.end() window.show() window.make_current() figManager = FigureManagerFltkAgg(canvas, num, window) if matplotlib.is_interactive(): figManager.show() return figManager class FltkCanvas(Fltk.Fl_Widget): def __init__(self,x,y,w,h,l,source): Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas") self._source=source self._oldsize=(None,None) self._draw_overlay = False self._button = None self._key = None def draw(self): newsize=(self.w(),self.h()) if(self._oldsize !=newsize): self._oldsize =newsize self._source.resize(newsize) self._source.draw() t1,t2,w,h = self._source.figure.bbox.bounds Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0) self.redraw() def blit(self,bbox=None): if bbox is None: t1,t2,w,h = self._source.figure.bbox.bounds else: t1o,t2o,wo,ho = self._source.figure.bbox.bounds t1,t2,w,h = bbox.bounds x,y=int(t1),int(t2) Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4) #self.redraw() def handle(self, event): x=Fltk.Fl.event_x() y=Fltk.Fl.event_y() yf=self._source.figure.bbox.height() - y if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS: return 1 elif event == Fltk.FL_KEYDOWN: ikey= Fltk.Fl.event_key() if(ikey<=255): self._key=chr(ikey) else: try: self._key=special_key[ikey] except: self._key=None FigureCanvasBase.key_press_event(self._source, self._key) return 1 elif event == Fltk.FL_KEYUP: FigureCanvasBase.key_release_event(self._source, self._key) self._key=None elif event == Fltk.FL_PUSH: self.window().make_current() if Fltk.Fl.event_button1(): self._button = 1 elif Fltk.Fl.event_button2(): self._button = 2 elif Fltk.Fl.event_button3(): self._button = 3 else: self._button = None if self._draw_overlay: self._oldx=x self._oldy=y if Fltk.Fl.event_clicks(): FigureCanvasBase.button_press_event(self._source, x, yf, self._button) return 1 else: FigureCanvasBase.button_press_event(self._source, x, yf, self._button) return 1 elif event == Fltk.FL_ENTER: self.take_focus() return 1 elif event == Fltk.FL_LEAVE: return 1 elif event == Fltk.FL_MOVE: FigureCanvasBase.motion_notify_event(self._source, x, yf) return 1 elif event == Fltk.FL_DRAG: self.window().make_current() if self._draw_overlay: self._dx=Fltk.Fl.event_x()-self._oldx self._dy=Fltk.Fl.event_y()-self._oldy Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy) FigureCanvasBase.motion_notify_event(self._source, x, yf) return 1 elif event == Fltk.FL_RELEASE: self.window().make_current() if self._draw_overlay: Fltk.fl_overlay_clear() FigureCanvasBase.button_release_event(self._source, x, yf, self._button) self._button = None return 1 return 0 class FigureCanvasFltkAgg(FigureCanvasAgg): def __init__(self, figure): FigureCanvasAgg.__init__(self,figure) t1,t2,w,h = self.figure.bbox.bounds w, h = int(w), int(h) self.canvas=FltkCanvas(0, 0, w, h, "canvas",self) #self.draw() def resize(self,size): w, h = size # compute desired figure size in inches dpival = self.figure.dpi.get() winch = w/dpival hinch = h/dpival self.figure.set_size_inches(winch,hinch) def draw(self): FigureCanvasAgg.draw(self) self.canvas.redraw() def blit(self,bbox): self.canvas.blit(bbox) show = draw def widget(self): return self.canvas def start_event_loop(self,timeout): FigureCanvasBase.start_event_loop_default(self,timeout) start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__ def stop_event_loop(self): FigureCanvasBase.stop_event_loop_default(self) stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__ def destroy_figure(ptr,figman): figman.window.hide() Gcf.destroy(figman._num) class FigureManagerFltkAgg(FigureManagerBase): """ Public attributes canvas : The FigureCanvas instance num : The Figure number toolbar : The fltk.Toolbar window : The fltk.Window """ def __init__(self, canvas, num, window): FigureManagerBase.__init__(self, canvas, num) #Fltk container window t1,t2,w,h = canvas.figure.bbox.bounds w, h = int(w), int(h) self.window = window self.window.size(w,h+30) self.window_title="Figure %d" % num self.window.label(self.window_title) self.window.size_range(350,200) self.window.callback(destroy_figure,self) self.canvas = canvas self._num = num if matplotlib.rcParams['toolbar']=='classic': self.toolbar = NavigationToolbar( canvas, self ) elif matplotlib.rcParams['toolbar']=='toolbar2': self.toolbar = NavigationToolbar2FltkAgg( canvas, self ) else: self.toolbar = None self.window.add_resizable(canvas.widget()) if self.toolbar: self.window.add(self.toolbar.widget()) self.toolbar.update() self.window.show() def notify_axes_change(fig): 'this will be called whenever the current axes is changed' if self.toolbar != None: self.toolbar.update() self.canvas.figure.add_axobserver(notify_axes_change) def resize(self, event): width, height = event.width, event.height self.toolbar.configure(width=width) # , height=height) def show(self): _focus = windowing.FocusManager() self.canvas.draw() self.window.redraw() def set_window_title(self, title): self.window_title=title self.window.label(title) class AxisMenu: def __init__(self, toolbar): self.toolbar=toolbar self._naxes = toolbar.naxes self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes") self._mbutton.add("Select All",0,select_all,self,0) self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER) self._axis_txt=[] self._axis_var=[] for i in range(self._naxes): self._axis_txt.append("Axis %d" % (i+1)) self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE) for i in range(self._naxes): self._axis_var.append(self._mbutton.find_item(self._axis_txt[i])) self._axis_var[i].set() def adjust(self, naxes): if self._naxes < naxes: for i in range(self._naxes, naxes): self._axis_txt.append("Axis %d" % (i+1)) self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE) for i in range(self._naxes, naxes): self._axis_var.append(self._mbutton.find_item(self._axis_txt[i])) self._axis_var[i].set() elif self._naxes > naxes: for i in range(self._naxes-1, naxes-1, -1): self._mbutton.remove(i+2) if(naxes): self._axis_var=self._axis_var[:naxes-1] self._axis_txt=self._axis_txt[:naxes-1] else: self._axis_var=[] self._axis_txt=[] self._naxes = naxes set_active(0,self) def widget(self): return self._mbutton def get_indices(self): a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()] return a def set_active(ptr,amenu): amenu.toolbar.set_active(amenu.get_indices()) def invert_all(ptr,amenu): for a in amenu._axis_var: if not a.value(): a.set() set_active(ptr,amenu) def select_all(ptr,amenu): for a in amenu._axis_var: a.set() set_active(ptr,amenu) class FLTKButton: def __init__(self, text, file, command,argument,type="classic"): file = os.path.join(rcParams['datapath'], 'images', file) self.im = Fltk.Fl_PNM_Image(file) size=26 if type=="repeat": self.b = Fltk.Fl_Repeat_Button(0,0,size,10) self.b.box(Fltk.FL_THIN_UP_BOX) elif type=="classic": self.b = Fltk.Fl_Button(0,0,size,10) self.b.box(Fltk.FL_THIN_UP_BOX) elif type=="light": self.b = Fltk.Fl_Light_Button(0,0,size+20,10) self.b.box(Fltk.FL_THIN_UP_BOX) elif type=="pushed": self.b = Fltk.Fl_Button(0,0,size,10) self.b.box(Fltk.FL_UP_BOX) self.b.down_box(Fltk.FL_DOWN_BOX) self.b.type(Fltk.FL_TOGGLE_BUTTON) self.tooltiptext=text+" " self.b.tooltip(self.tooltiptext) self.b.callback(command,argument) self.b.image(self.im) self.b.deimage(self.im) self.type=type def widget(self): return self.b class NavigationToolbar: """ Public attriubutes canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget) """ def __init__(self, canvas, figman): #xmin, xmax = canvas.figure.bbox.intervalx().get_bounds() #height, width = 50, xmax-xmin self.canvas = canvas self.figman = figman Fltk.Fl_File_Icon.load_system_icons() self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" ) self._fc.hide() t1,t2,w,h = canvas.figure.bbox.bounds w, h = int(w), int(h) self._group = Fltk.Fl_Pack(0,h+2,1000,26) self._group.type(Fltk.FL_HORIZONTAL) self._axes=self.canvas.figure.axes self.naxes = len(self._axes) self.omenu = AxisMenu( toolbar=self) self.bLeft = FLTKButton( text="Left", file="stock_left.ppm", command=pan,argument=(self,1,'x'),type="repeat") self.bRight = FLTKButton( text="Right", file="stock_right.ppm", command=pan,argument=(self,-1,'x'),type="repeat") self.bZoomInX = FLTKButton( text="ZoomInX",file="stock_zoom-in.ppm", command=zoom,argument=(self,1,'x'),type="repeat") self.bZoomOutX = FLTKButton( text="ZoomOutX", file="stock_zoom-out.ppm", command=zoom, argument=(self,-1,'x'),type="repeat") self.bUp = FLTKButton( text="Up", file="stock_up.ppm", command=pan,argument=(self,1,'y'),type="repeat") self.bDown = FLTKButton( text="Down", file="stock_down.ppm", command=pan,argument=(self,-1,'y'),type="repeat") self.bZoomInY = FLTKButton( text="ZoomInY", file="stock_zoom-in.ppm", command=zoom,argument=(self,1,'y'),type="repeat") self.bZoomOutY = FLTKButton( text="ZoomOutY",file="stock_zoom-out.ppm", command=zoom, argument=(self,-1,'y'),type="repeat") self.bSave = FLTKButton( text="Save", file="stock_save_as.ppm", command=save_figure, argument=self) self._group.end() def widget(self): return self._group def close(self): Gcf.destroy(self.figman._num) def set_active(self, ind): self._ind = ind self._active = [ self._axes[i] for i in self._ind ] def update(self): self._axes = self.canvas.figure.axes naxes = len(self._axes) self.omenu.adjust(naxes) def pan(ptr, arg): base,direction,axe=arg for a in base._active: if(axe=='x'): a.panx(direction) else: a.pany(direction) base.figman.show() def zoom(ptr, arg): base,direction,axe=arg for a in base._active: if(axe=='x'): a.zoomx(direction) else: a.zoomy(direction) base.figman.show() def save_figure(ptr,base): filetypes = base.canvas.get_supported_filetypes() default_filetype = base.canvas.get_default_filetype() sorted_filetypes = filetypes.items() sorted_filetypes.sort() selected_filter = 0 filters = [] for i, (ext, name) in enumerate(sorted_filetypes): filter = '%s (*.%s)' % (name, ext) filters.append(filter) if ext == default_filetype: selected_filter = i filters = '\t'.join(filters) file_chooser=base._fc file_chooser.filter(filters) file_chooser.filter_value(selected_filter) file_chooser.show() while file_chooser.visible() : Fltk.Fl.wait() fname=None if(file_chooser.count() and file_chooser.value(0) != None): fname="" (status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0)) if fname is None: # Cancel return #start from last directory lastDir = os.path.dirname(fname) file_chooser.directory(lastDir) format = sorted_filetypes[file_chooser.filter_value()][0] try: base.canvas.print_figure(fname, format=format) except IOError, msg: err = '\n'.join(map(str, msg)) msg = 'Failed to save %s: Error msg was\n\n%s' % ( fname, err) error_msg_fltk(msg) class NavigationToolbar2FltkAgg(NavigationToolbar2): """ Public attriubutes canvas - the FigureCanvas figman - the Figure manager """ def __init__(self, canvas, figman): self.canvas = canvas self.figman = figman NavigationToolbar2.__init__(self, canvas) self.pan_selected=False self.zoom_selected=False def set_cursor(self, cursor): Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE) def dynamic_update(self): self.canvas.draw() def pan(self,*args): self.pan_selected=not self.pan_selected self.zoom_selected = False self.canvas.canvas._draw_overlay= False if self.pan_selected: self.bPan.widget().value(1) else: self.bPan.widget().value(0) if self.zoom_selected: self.bZoom.widget().value(1) else: self.bZoom.widget().value(0) NavigationToolbar2.pan(self,args) def zoom(self,*args): self.zoom_selected=not self.zoom_selected self.canvas.canvas._draw_overlay=self.zoom_selected self.pan_selected = False if self.pan_selected: self.bPan.widget().value(1) else: self.bPan.widget().value(0) if self.zoom_selected: self.bZoom.widget().value(1) else: self.bZoom.widget().value(0) NavigationToolbar2.zoom(self,args) def configure_subplots(self,*args): window = Fltk.Fl_Double_Window(100,100,480,240) toolfig = Figure(figsize=(6,3)) canvas = FigureCanvasFltkAgg(toolfig) window.end() toolfig.subplots_adjust(top=0.9) tool = SubplotTool(self.canvas.figure, toolfig) window.show() canvas.show() def _init_toolbar(self): Fltk.Fl_File_Icon.load_system_icons() self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" ) self._fc.hide() t1,t2,w,h = self.canvas.figure.bbox.bounds w, h = int(w), int(h) self._group = Fltk.Fl_Pack(0,h+2,1000,26) self._group.type(Fltk.FL_HORIZONTAL) self._axes=self.canvas.figure.axes self.naxes = len(self._axes) self.omenu = AxisMenu( toolbar=self) self.bHome = FLTKButton( text="Home", file="home.ppm", command=self.home,argument=self) self.bBack = FLTKButton( text="Back", file="back.ppm", command=self.back,argument=self) self.bForward = FLTKButton( text="Forward", file="forward.ppm", command=self.forward,argument=self) self.bPan = FLTKButton( text="Pan/Zoom",file="move.ppm", command=self.pan,argument=self,type="pushed") self.bZoom = FLTKButton( text="Zoom to rectangle",file="zoom_to_rect.ppm", command=self.zoom,argument=self,type="pushed") self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm", command = self.configure_subplots,argument=self,type="pushed") self.bSave = FLTKButton( text="Save", file="filesave.ppm", command=save_figure, argument=self) self._group.end() self.message = Fltk.Fl_Output(0,0,w,8) self._group.add_resizable(self.message) self.update() def widget(self): return self._group def close(self): Gcf.destroy(self.figman._num) def set_active(self, ind): self._ind = ind self._active = [ self._axes[i] for i in self._ind ] def update(self): self._axes = self.canvas.figure.axes naxes = len(self._axes) self.omenu.adjust(naxes) NavigationToolbar2.update(self) def set_message(self, s): self.message.value(s) FigureManager = FigureManagerFltkAgg
gpl-3.0
BhallaLab/moose-full
moose-examples/snippets/switchKineticSolvers.py
2
5089
######################################################################### ## This program is part of 'MOOSE', the ## Messaging Object Oriented Simulation Environment. ## Copyright (C) 2014 Upinder S. Bhalla. and NCBS ## It is made available under the terms of the ## GNU Lesser General Public License version 2.1 ## See the file COPYING.LIB for the full notice. ######################################################################### import moose import pylab import numpy import matplotlib.pyplot as plt import sys def runAndSavePlots( name ): runtime = 20.0 moose.reinit() moose.start( runtime ) pa = moose.Neutral( '/model/graphs/' + name ) for x in moose.wildcardFind( '/model/#graphs/conc#/#' ): if ( x.tick != -1 ): tabname = '/model/graphs/' + name + '/' + x.name + '.' + name y = moose.Table( tabname ) y.vector = x.vector y.tick = -1 # Takes args ee, gsl, or gssa def switchSolvers( solver ): if ( moose.exists( 'model/kinetics/stoich' ) ): moose.delete( '/model/kinetics/stoich' ) moose.delete( '/model/kinetics/ksolve' ) compt = moose.element( '/model/kinetics' ) if ( solver == 'gsl' ): ksolve = moose.Ksolve( '/model/kinetics/ksolve' ) if ( solver == 'gssa' ): ksolve = moose.Gsolve( '/model/kinetics/ksolve' ) if ( solver != 'ee' ): stoich = moose.Stoich( '/model/kinetics/stoich' ) stoich.compartment = compt stoich.ksolve = ksolve stoich.path = "/model/kinetics/##" def main(): """ At zero order, you can select the solver you want to use within the function moose.loadModel( filename, modelpath, solver ). Having loaded in the model, you can change the solver to use on it. This example illustrates how to assign and change solvers for a kinetic model. This process is necessary in two situations: * If we want to change the numerical method employed, for example, from deterministic to stochastic. * If we are already using a solver, and we have changed the reaction network by adding or removing molecules or reactions. Note that we do not have to change the solvers if the volume or reaction rates change. In this example the model is loaded in with a gsl solver. The sequence of solver calculations is: #. gsl #. ee #. gsl #. gssa #. gsl If you're removing the solvers, you just delete the stoichiometry object and the associated ksolve/gsolve. Should there be diffusion (a dsolve)then you should delete that too. If you're building the solvers up again, then you must do the following steps in order: #. build up the ksolve/gsolve and stoich (any order) #. Assign stoich.ksolve #. Assign stoich.path. See the Reaction-diffusion section should you want to do diffusion as well. """ solver = "gsl" # Pick any of gsl, gssa, ee.. mfile = '../genesis/kkit_objects_example.g' modelId = moose.loadModel( mfile, 'model', solver ) # Increase volume so that the stochastic solver gssa # gives an interesting output compt = moose.element( '/model/kinetics' ) compt.volume = 1e-19 runAndSavePlots( 'gsl' ) ######################################################### switchSolvers( 'ee' ) runAndSavePlots( 'ee' ) ######################################################### switchSolvers( 'gsl' ) runAndSavePlots( 'gsl2' ) ######################################################### switchSolvers( 'gssa' ) runAndSavePlots( 'gssa' ) ######################################################### switchSolvers( 'gsl' ) runAndSavePlots( 'gsl3' ) ######################################################### # Display all plots. fig = plt.figure( figsize = (12, 10) ) orig = fig.add_subplot( 511 ) gsl = fig.add_subplot( 512 ) ee = fig.add_subplot( 513 ) gsl2 = fig.add_subplot( 514 ) gssa = fig.add_subplot( 515 ) plotdt = moose.element( '/clock' ).tickDt[18] for x in moose.wildcardFind( '/model/#graphs/conc#/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt orig.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gsl/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gsl.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/ee/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt ee.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gsl2/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gsl2.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gssa/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gssa.plot( t, x.vector, label=x.name ) plt.legend() pylab.show() quit() # Run the 'main' if this script is executed standalone. if __name__ == '__main__': main()
gpl-2.0
emon10005/scikit-image
doc/examples/plot_medial_transform.py
14
2220
""" =========================== Medial axis skeletonization =========================== The medial axis of an object is the set of all points having more than one closest point on the object's boundary. It is often called the **topological skeleton**, because it is a 1-pixel wide skeleton of the object, with the same connectivity as the original object. Here, we use the medial axis transform to compute the width of the foreground objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``) returns the distance transform in addition to the medial axis (with the keyword argument ``return_distance=True``), it is possible to compute the distance to the background for all points of the medial axis with this function. This gives an estimate of the local width of the objects. For a skeleton with fewer branches, there exists another skeletonization algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes a skeleton by iterative morphological thinnings. """ import numpy as np from scipy import ndimage as ndi from skimage.morphology import medial_axis import matplotlib.pyplot as plt def microstructure(l=256): """ Synthetic binary data: binary microstructure with blobs. Parameters ---------- l: int, optional linear size of the returned image """ n = 5 x, y = np.ogrid[0:l, 0:l] mask = np.zeros((l, l)) generator = np.random.RandomState(1) points = l * generator.rand(2, n**2) mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1 mask = ndi.gaussian_filter(mask, sigma=l/(4.*n)) return mask > mask.mean() data = microstructure(l=64) # Compute the medial axis (skeleton) and the distance transform skel, distance = medial_axis(data, return_distance=True) # Distance to the background for pixels of the skeleton dist_on_skel = distance * skel fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest') ax1.axis('off') ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest') ax2.contour(data, [0.5], colors='w') ax2.axis('off') fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) plt.show()
bsd-3-clause
amanzi/ats-dev
tools/meshing_ats/meshing_ats/meshing_ats.py
1
34933
"""Extrudes a 2D mesh to generate an ExodusII 3D mesh. Works with and assumes all polyhedra cells (and polygon faces). To see usage, run: ------------------------------------------------------------ python meshing_ats.py -h Example distributed with this source, to run: ------------------------------------------------------------ $> cd four-polygon-test $> python ../meshing_ats.py -n 10 -d 1 ./four_polygon.vtk $> mkdir run0 $> cd run0 $> ats --xml_file=../test1-fv-four-polygon.xml Requires building the latest version of Exodus ------------------------------------------------------------ Note that this is typically done in your standard ATS installation, assuming you have built your Amanzi TPLs with shared libraries (the default through bootstrap). In that case, simply ensure that ${AMANZI_TPLS_DIR}/SEACAS/lib is in your PYTHONPATH. """ from __future__ import print_function import sys,os import numpy as np import collections import argparse try: import exodus except ImportError: sys.path.append(os.path.join(os.environ["SEACAS_DIR"],"lib")) import exodus class SideSet(object): def __init__(self, name, setid, elem_list, side_list): assert(type(setid) == int) assert(type(elem_list) == list or type(elem_list) == np.ndarray) assert(type(side_list) == list or type(side_list) == np.ndarray) self.name = name self.setid = setid self.elem_list = elem_list self.side_list = side_list class LabeledSet(object): def __init__(self, name, setid, entity, ent_ids): assert entity in ['CELL', 'FACE', 'NODE'] assert(type(setid) == int) assert(type(ent_ids) == list or type(ent_ids) == np.ndarray) self.name = name self.setid = setid self.entity = entity self.ent_ids = np.array(ent_ids) class Mesh2D(object): def __init__(self, coords, connectivity, labeled_sets=None, check_handedness=True): """ Creates a 2D mesh from coordinates and a list cell-to-node connectivity lists. coords : numpy array of shape (NCOORDS, NDIMS) connectivity : list of lists of integer indices into coords specifying a (clockwise OR counterclockwise) ordering of the nodes around the 2D cell labeled_sets : list of LabeledSet objects """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 self.dim = coords.shape[1] self.coords = coords self.conn = connectivity if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] self.validate() self.edge_counts() if check_handedness: self.check_handedness() def validate(self): assert self.coords.shape[1] == 2 or self.coords.shape[1] == 3 assert type(self.conn) is list for f in self.conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] for ls in self.labeled_sets: if ls.entity == "NODE": size = len(self.coords) elif ls.entity == "CELL": size = len(self.conn) for i in ls.ent_ids: assert i < size return True def num_cells(self): return len(self.conn) def num_nodes(self): return self.coords.shape[0] def num_edges(self): return len(self.edges()) @staticmethod def edge_hash(i,j): return tuple(sorted((i,j))) def edges(self): return self.edge_counts().keys() def edge_counts(self): try: return self._edges except AttributeError: self._edges = collections.Counter(self.edge_hash(f[i], f[(i+1)%len(f)]) for f in self.conn for i in range(len(f))) return self._edges def check_handedness(self): for conn in self.conn: points = np.array([self.coords[c] for c in conn]) cross = 0 for i in range(len(points)): im = i - 1 ip = i + 1 if ip == len(points): ip = 0 p = points[ip] - points[i] m = points[i] - points[im] cross = cross + p[1] * m[0] - p[0] * m[1] if cross < 0: conn.reverse() def plot(self, color=None, ax=None): if color is None: import colors cm = colors.cm_mapper(0,self.num_cells()-1) colors = [cm(i) for i in range(self.num_cells())] else: colors = color verts = [[self.coords[i,0:2] for i in f] for f in self.conn] from matplotlib import collections gons = collections.PolyCollection(verts, facecolors=colors) from matplotlib import pyplot as plt if ax is None: fig,ax = plt.subplots(1,1) ax.add_collection(gons) ax.autoscale_view() @classmethod def read_VTK(cls, filename): try: return cls.read_VTK_Simplices(filename) except AssertionError: return cls.read_VTK_Unstructured(filename) @classmethod def read_VTK_Unstructured(cls, filename): with open(filename,'r') as fid: points_found = False polygons_found = False while True: line = fid.readline().decode('utf-8') if not line: # EOF break line = line.strip() if len(line) == 0: continue split = line.split() section = split[0] if section == 'POINTS': ncoords = int(split[1]) points = np.fromfile(fid, count=ncoords*3, sep=' ', dtype='d') points = points.reshape(ncoords, 3) points_found = True elif section == 'POLYGONS': ncells = int(split[1]) n_to_read = int(split[2]) gons = [] data = np.fromfile(fid, count=n_to_read, sep=' ', dtype='i') idx = 0 for i in range(ncells): n_in_gon = data[idx] gon = list(data[idx+1:idx+1+n_in_gon]) # check handedness -- need normals to point up! cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() gons.append(gon) idx += n_in_gon + 1 assert(idx == n_to_read) polygons_found = True if not points_found: raise RuntimeError("Unstructured VTK must contain sections 'POINTS'") if not polygons_found: raise RuntimeError("Unstructured VTK must contain sections 'POLYGONS'") return cls(points, gons) @classmethod def read_VTK_Simplices(cls, filename): """Stolen from meshio, https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py""" import vtk_io with open(filename,'r') as fid: data = vtk_io.read_buffer(fid) points = data[0] if len(data[1]) != 1: raise RuntimeError("Simplex VTK file is readable by vtk_io but not by meshing_ats. Includes: %r"%data[1].keys()) gons = [v for v in data[1].itervalues()][0] gons = gons.tolist() # check handedness for gon in gons: cross = [] for i in range(len(gon)): if i == len(gon)-1: ip = 0 ipp = 1 elif i == len(gon)-2: ip = i+1 ipp = 0 else: ip = i+1 ipp = i+2 d2 = points[gon[ipp]] - points[gon[ip]] d1 = points[gon[i]] - points[gon[ip]] cross.append(np.cross(d2, d1)) if (np.array([c[2] for c in cross]).mean() < 0): gon.reverse() return cls(points, gons) @classmethod def from_Transect(cls, x, z, width=1): """Creates a 2D surface strip mesh from transect data""" # coordinates if (type(width) is list or type(width) is np.ndarray): variable_width = True y = np.array([0,1]) else: variable_width = False y = np.array([0,width]) Xc, Yc = np.meshgrid(x, y) if variable_width: assert(Yc.shape[1] == 2) assert(len(width) == Yc.shape[0]) assert(min(width) > 0.) Yc[:,0] = -width/2. Yc[:,1] = width/2. Xc = Xc.flatten() Yc = Yc.flatten() Zc = np.concatenate([z,z]) # connectivity nsurf_cells = len(x)-1 conn = [] for i in range(nsurf_cells): conn.append([i, i+1, nsurf_cells + i + 2, nsurf_cells + i + 1]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_Guide(cls, x, z, guide): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = Xc ynew = Yc znew = Zc for i in range(nsteps): xnew = xnew + guide[i][0] ynew = ynew + guide[i][1] znew = znew + guide[i][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) @classmethod def from_Transect_GuideX(cls, x, z, guide, nsteps): """Creates a 2D surface strip mesh from transect data""" assert type(guide) == np.ndarray assert guide.shape[1] == 3 # coordinates Xc = x Yc = np.zeros_like(x) Zc = z nsteps = guide.shape[0] xnew = np.zeros_like(x) ynew = np.zeros(len(x)) znew = np.zeros_like(x) xnew[:] = Xc[:] ynew[:] = Yc[:] znew[:] = Zc[:] for i in range(nsteps): print(Yc) for j in range(len(x)): xnew[j] = xnew[j] + guide[j][0] ynew[j] = ynew[j] + guide[j][1] znew[j] = znew[j] + guide[j][2] Xc = np.concatenate([Xc, xnew]) Yc = np.concatenate([Yc, ynew]) Zc = np.concatenate([Zc, znew]) # y = np.array([0,1,2]) # Xc, Yc = np.meshgrid(x, y) # Xc = Xc.flatten() # Yc = Yc.flatten() # Zc = np.concatenate([z,z,z]) # connectivity ns = len(x) conn = [] for j in range(nsteps): for i in range(ns-1): conn.append([j*ns + i, j*ns + i + 1, (j+1)*ns + i + 1, (j+1)*ns + i ]) coords = np.array([Xc, Yc, Zc]) return cls(coords.transpose(), conn) class Mesh3D(object): def __init__(self, coords, face_to_node_conn, elem_to_face_conn, side_sets=None, labeled_sets=None, material_ids=None): """ Creates a 3D mesh from coordinates and connectivity lists. coords : numpy array of shape (NCOORDS, 3) face_to_node_conn : list of lists of integer indices into coords specifying an (clockwise OR counterclockwise) ordering of the nodes around the face elem_to_face_conn : list of lists of integer indices into face_to_node_conn specifying a list of faces that make up the elem """ assert type(coords) == np.ndarray assert len(coords.shape) == 2 assert coords.shape[1] == 3 self.dim = coords.shape[1] self.coords = coords self.face_to_node_conn = face_to_node_conn self.elem_to_face_conn = elem_to_face_conn if labeled_sets is not None: self.labeled_sets = labeled_sets else: self.labeled_sets = [] if side_sets is not None: self.side_sets = side_sets else: self.side_sets = [] if material_ids is not None: self.material_id_list = collections.Counter(material_ids).keys() self.material_ids = material_ids else: self.material_id_list = [10000,] self.material_ids = [10000,]*len(self.elem_to_face_conn) self.validate() def validate(self): assert self.coords.shape[1] == 3 assert type(self.face_to_node_conn) is list for f in self.face_to_node_conn: assert type(f) is list assert len(set(f)) == len(f) for i in f: assert i < self.coords.shape[0] assert type(self.elem_to_face_conn) is list for e in self.elem_to_face_conn: assert type(e) is list assert len(set(e)) == len(e) for i in e: assert i < len(self.face_to_node_conn) for ls in self.labeled_sets: if ls.entity == "NODE": size = self.num_nodes() if ls.entity == "FACE": size = self.num_faces() elif ls.entity == "CELL": size = self.num_cells() for i in ls.ent_ids: assert i < size for ss in self.side_sets: for j,i in zip(ss.elem_list, ss.side_list): assert j < self.num_cells() assert i < len(self.elem_to_face_conn[j]) def num_cells(self): return len(self.elem_to_face_conn) def num_faces(self): return len(self.face_to_node_conn) def num_nodes(self): return self.coords.shape[0] def write_exodus(self, filename, face_block_mode="one block"): """Write the 3D mesh to ExodusII using arbitrary polyhedra spec""" # put cells in with blocks, which renumbers the cells, so we have to track sidesets. # Therefore we keep a map of old cell to new cell ordering # # also, though not required by the spec, paraview and visit # seem to crash if num_face_blocks != num_elem_blocks. So # make face blocks here too, which requires renumbering the faces. # -- first pass, form all elem blocks and make the map from old-to-new new_to_old_elems = [] elem_blks = [] for i_m,m_id in enumerate(self.material_id_list): # split out elems of this material, save new_to_old map elems_tuple = [(i,c) for (i,c) in enumerate(self.elem_to_face_conn) if self.material_ids[i] == m_id] new_to_old_elems.extend([i for (i,c) in elems_tuple]) elems = [c for (i,c) in elems_tuple] elem_blks.append(elems) old_to_new_elems = sorted([(old,i) for (i,old) in enumerate(new_to_old_elems)], lambda a,b: int.__cmp__(a[0],b[0])) # -- deal with faces, form all face blocks and make the map from old-to-new face_blks = [] if face_block_mode == "one block": # no reordering of faces needed face_blks.append(self.face_to_node_conn) elif face_block_mode == "n blocks, not duplicated": used_faces = np.zeros((len(self.face_to_node_conn),),'bool') new_to_old_faces = [] for i_m,m_id in enumerate(self.material_id_list): # split out faces of this material, save new_to_old map def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] faces_tuple = [(f,self.face_to_node_conn[f]) for c in elem_blk for (j,f) in enumerate(c) if not used(f)] new_to_old_faces.extend([j for (j,f) in faces_tuple]) faces = [f for (j,f) in faces_tuple] face_blks.append(faces) # get the renumbering in the elems old_to_new_faces = sorted([(old,j) for (j,old) in enumerate(new_to_old_faces)], lambda a,b: int.__cmp__(a[0],b[0])) elem_blks = [[[old_to_new_faces[f][1] for f in c] for c in elem_blk] for elem_blk in elem_blks] elif face_block_mode == "n blocks, duplicated": elem_blks_new = [] offset = 0 for i_m, m_id in enumerate(self.material_id_list): used_faces = np.zeros((len(self.face_to_node_conn),),'bool') def used(f): result = used_faces[f] used_faces[f] = True return result elem_blk = elem_blks[i_m] tuple_old_f = [(f,self.face_to_node_conn[f]) for c in elem_blk for f in c if not used(f)] tuple_new_old_f = [(new,old,f) for (new,(old,f)) in enumerate(tuple_old_f)] old_to_new_blk = np.zeros((len(self.face_to_node_conn),),'i')-1 for new,old,f in tuple_new_old_f: old_to_new_blk[old] = new + offset elem_blk_new = [[old_to_new_blk[f] for f in c] for c in elem_blk] #offset = offset + len(ftuple_new) elem_blks_new.append(elem_blk_new) face_blks.append([f for i,j,f in tuple_new_old_f]) elem_blks = elem_blks_new elif face_block_mode == "one block, repeated": # no reordering of faces needed, just repeat for eblock in elem_blks: face_blks.append(self.face_to_node_conn) else: raise RuntimeError("Invalid face_block_mode: '%s', valid='one block', 'n blocks, duplicated', 'n blocks, not duplicated'"%face_block_mode) # open the mesh file num_elems = sum(len(elem_blk) for elem_blk in elem_blks) num_faces = sum(len(face_blk) for face_blk in face_blks) ep = exodus.ex_init_params(title=filename, num_dim=3, num_nodes=self.num_nodes(), num_face=num_faces, num_face_blk=len(face_blks), num_elem=num_elems, num_elem_blk=len(elem_blks), num_side_sets=len(self.side_sets)) e = exodus.exodus(filename, mode='w', array_type='numpy', init_params=ep) # put the coordinates e.put_coord_names(['coordX', 'coordY', 'coordZ']) e.put_coords(self.coords[:,0], self.coords[:,1], self.coords[:,2]) # put the face blocks for i_blk, face_blk in enumerate(face_blks): face_raveled = [n for f in face_blk for n in f] e.put_polyhedra_face_blk(i_blk+1, len(face_blk), len(face_raveled), 0) e.put_node_count_per_face(i_blk+1, np.array([len(f) for f in face_blk])) e.put_face_node_conn(i_blk+1, np.array(face_raveled)+1) # put the elem blocks assert len(elem_blks) == len(self.material_id_list) for i_blk, (m_id, elem_blk) in enumerate(zip(self.material_id_list, elem_blks)): elems_raveled = [f for c in elem_blk for f in c] e.put_polyhedra_elem_blk(m_id, len(elem_blk), len(elems_raveled), 0) e.put_elem_blk_name(m_id, "MATERIAL_ID_%d"%m_id) e.put_face_count_per_polyhedra(m_id, np.array([len(c) for c in elem_blk])) e.put_elem_face_conn(m_id, np.array(elems_raveled)+1) # add sidesets e.put_side_set_names([ss.name for ss in self.side_sets]) for ss in self.side_sets: for elem in ss.elem_list: assert old_to_new_elems[elem][0] == elem new_elem_list = [old_to_new_elems[elem][1] for elem in ss.elem_list] e.put_side_set_params(ss.setid, len(ss.elem_list), 0) e.put_side_set(ss.setid, np.array(new_elem_list)+1, np.array(ss.side_list)+1) # finish and close e.close() @classmethod def extruded_Mesh2D(cls, mesh2D, layer_types, layer_data, ncells_per_layer, mat_ids): """ Regularly extrude a 2D mesh to make a 3D mesh. mesh2D : a Mesh2D object layer_types : either a string (type) or list of strings (types) layer_data : array of data needed (specific to the type) ncells_per_layer : either a single integer (same number of cells in all : layers) or a list of number of cells in the layer mat_ids : either a single integer (one mat_id for all layers) : or a list of integers (mat_id for each layer) : or a 2D numpy array of integers (mat_id for each layer and each column: [layer_id, surface_cell_id]) types: - 'constant' : (data=float thickness) uniform thickness - 'function' : (data=function or functor) thickness as a function : of (x,y) - 'snapped' : (data=float z coordinate) snap the layer to : provided z coordinate, telescoping as needed - 'node' : thickness provided on each node of the surface domain - 'cell' : thickness provided on each cell of the surface domain, : interpolate to nodes NOTE: dz is uniform through the layer in all but the 'snapped' case NOTE: 2D mesh is always labeled 'surface', extrusion is always downwards """ # make the data all lists # --------------------------------- def is_list(data): if type(data) is str: return False try: len(data) except TypeError: return False else: return True if is_list(layer_types): if not is_list(layer_data): layer_data = [layer_data,]*len(layer_types) else: assert len(layer_data) == len(layer_types) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_types) else: assert len(ncells_per_layer) == len(layer_types) elif is_list(layer_data): layer_types = [layer_types,]*len(layer_data) if not is_list(ncells_per_layer): ncells_per_layer = [ncells_per_layer,]*len(layer_data) else: assert len(ncells_per_layer) == len(layer_data) elif is_list(ncells_per_layer): layer_type = [layer_type,]*len(ncells_per_layer) layer_data = [layer_data,]*len(ncells_per_layer) else: layer_type = [layer_type,] layer_data = [layer_data,] ncells_per_layer = [ncells_per_layer,] # helper data and functions for mapping indices from 2D to 3D # ------------------------------------------------------------------ if min(ncells_per_layer) < 0: raise RuntimeError("Invalid number of cells, negative value provided.") ncells_tall = sum(ncells_per_layer) ncells_total = ncells_tall * mesh2D.num_cells() nfaces_total = (ncells_tall+1) * mesh2D.num_cells() + ncells_tall * mesh2D.num_edges() nnodes_total = (ncells_tall+1) * mesh2D.num_nodes() np_mat_ids = np.array(mat_ids, dtype=int) if np_mat_ids.size == np.size(np_mat_ids, 0): if np_mat_ids.size == 1: np_mat_ids = np.full((len(ncells_per_layer), mesh2D.num_cells()), mat_ids[0], dtype=int) else: np_mat_ids = np.empty((len(ncells_per_layer), mesh2D.num_cells()), dtype=int) for ilay in range(len(ncells_per_layer)): np_mat_ids[ilay, :] = np.full(mesh2D.num_cells(), mat_ids[ilay], dtype=int) def col_to_id(column, z_cell): """Maps 2D cell ID and index in the vertical to a 3D cell ID""" return z_cell + column * ncells_tall def node_to_id(node, z_node): """Maps 2D node ID and index in the vertical to a 3D node ID""" return z_node + node * (ncells_tall+1) def edge_to_id(edge, z_cell): """Maps 2D edge hash and index in the vertical to a 3D face ID of a vertical face""" return (ncells_tall + 1) * mesh2D.num_cells() + z_cell + edge * ncells_tall # create coordinates # --------------------------------- coords = np.zeros((mesh2D.coords.shape[0],ncells_tall+1, 3),'d') coords[:,:,0:2] = np.expand_dims(mesh2D.coords[:,0:2],1) if mesh2D.dim == 3: coords[:,0,2] = mesh2D.coords[:,2] # else the surface is at 0 depth cell_layer_start = 0 for layer_type, layer_datum, ncells in zip(layer_types, layer_data, ncells_per_layer): if layer_type.lower() == 'constant': dz = float(layer_datum) / ncells for i in range(1,ncells+1): coords[:,cell_layer_start+i,2] = coords[:,cell_layer_start,2] - i * dz else: # allocate an array of coordinates for the bottom of the layer layer_bottom = np.zeros((mesh2D.coords.shape[0],),'d') if layer_type.lower() == 'snapped': # layer bottom is uniform layer_bottom[:] = layer_datum elif layer_type.lower() == 'function': # layer thickness is given by a function evaluation of x,y for node_col in range(mesh2D.coords.shape[0]): layer_bottom[node_col] = coords[node_col,cell_layer_start,2] - layer_datum(coords[node_col,0,0], coords[node_col,0,1]) elif layer_type.lower() == 'node': # layer bottom specifically provided through thickness layer_bottom[:] = coords[:,cell_layer_start,2] - layer_datum elif layer_type.lower() == 'cell': # interpolate cell thicknesses to node thicknesses import scipy.interpolate centroids = mesh2D.cell_centroids() interp = scipy.interpolate.interp2d(centroids[:,0], centroids[:,1], layer_datum, kind='linear') layer_bottom[:] = coords[:,cell_layer_start,2] - interp(mesh2D.coords[:,0], mesh2D.coords[:,1]) else: raise RuntimeError("Unrecognized layer_type '%s'"%layer_type) # linspace from bottom of previous layer to bottom of this layer for node_col in range(mesh2D.coords.shape[0]): coords[node_col,cell_layer_start:cell_layer_start+ncells+1,2] = np.linspace(coords[node_col,cell_layer_start,2], layer_bottom[node_col], ncells+1) cell_layer_start = cell_layer_start + ncells # create faces, face sets, cells bottom = [] surface = [] faces = [] cells = [list() for c in range(ncells_total)] # -- loop over the columns, adding the horizontal faces for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] surface.append(col_to_id(col,0)) for z_face in range(ncells_tall + 1): i_f = len(faces) f = [node_to_id(n, z_face) for n in nodes_2] if z_face != ncells_tall: cells[col_to_id(col, z_face)].append(i_f) if z_face != 0: cells[col_to_id(col, z_face-1)].append(i_f) faces.append(f) bottom.append(col_to_id(col,ncells_tall-1)) # -- loop over the columns, adding the vertical faces added = dict() vertical_side_cells = [] vertical_side_indices = [] for col in range(mesh2D.num_cells()): nodes_2 = mesh2D.conn[col] for i in range(len(nodes_2)): edge = mesh2D.edge_hash(nodes_2[i], nodes_2[(i+1)%len(nodes_2)]) try: i_e = added[edge] except KeyError: # faces not yet added to facelist i_e = len(added.keys()) added[edge] = i_e for z_face in range(ncells_tall): i_f = len(faces) assert i_f == edge_to_id(i_e, z_face) f = [node_to_id(edge[0], z_face), node_to_id(edge[1], z_face), node_to_id(edge[1], z_face+1), node_to_id(edge[0], z_face+1)] faces.append(f) face_cell = col_to_id(col, z_face) cells[face_cell].append(i_f) # check if this is an external if mesh2D._edges[edge] == 1: vertical_side_cells.append(face_cell) vertical_side_indices.append(len(cells[face_cell])-1) else: # faces already added from previous column for z_face in range(ncells_tall): i_f = edge_to_id(i_e, z_face) cells[col_to_id(col, z_face)].append(i_f) # Do some idiot checking # -- check we got the expected number of faces assert len(faces) == nfaces_total # -- check every cell is at least a tet for c in cells: assert len(c) > 4 # -- check surface sideset has the right number of entries assert len(surface) == mesh2D.num_cells() # -- check bottom sideset has the right number of entries assert len(bottom) == mesh2D.num_cells() # -- len of vertical sides sideset is number of external edges * number of cells, no pinchouts here num_sides = ncells_tall * sum(1 for e,c in mesh2D.edge_counts().iteritems() if c == 1) assert num_sides == len(vertical_side_cells) assert num_sides == len(vertical_side_indices) # make the material ids material_ids = np.zeros((len(cells),),'i') for col in range(mesh2D.num_cells()): z_cell = 0 for ilay in range(len(ncells_per_layer)): ncells = ncells_per_layer[ilay] for i in range(z_cell, z_cell+ncells): material_ids[col_to_id(col, i)] = np_mat_ids[ilay, col] z_cell = z_cell + ncells # make the side sets side_sets = [] side_sets.append(SideSet("bottom", 1, bottom, [1,]*len(bottom))) side_sets.append(SideSet("surface", 2, surface, [0,]*len(surface))) side_sets.append(SideSet("external_sides", 3, vertical_side_cells, vertical_side_indices)) # reshape coords coords = coords.reshape(nnodes_total, 3) for e,s in zip(side_sets[0].elem_list, side_sets[0].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "bottom centroid = ", np.mean(fz_coords, axis=0) for e,s in zip(side_sets[1].elem_list, side_sets[1].side_list): face = cells[e][s] fz_coords = np.array([coords[n] for n in faces[face]]) #print "surface centroid = ", np.mean(fz_coords, axis=0) # instantiate the mesh return cls(coords, faces, cells, side_sets=side_sets, material_ids=material_ids) def commandline_options(): parser = argparse.ArgumentParser(description='Extrude a 2D mesh to make a 3D mesh') parser.add_argument("-n", "--num-cells", default=10, type=int, help="number of cells to extrude") parser.add_argument("-d", "--depth", default=40.0, type=float, help="depth to extrude") parser.add_argument("-o", "--outfile", default=None, type=str, help="output filename") parser.add_argument("-p", "--plot", default=False, action="store_true", help="plot the 2D mesh") parser.add_argument("infile",metavar="INFILE", type=str, help="input filename of surface mesh") options = parser.parse_args() if options.outfile is None: options.outfile = ".".join(options.infile.split(".")[:-1])+".exo" if os.path.isfile(options.outfile): print('Output file "%s" exists, cowardly not overwriting.'%options.outfile) sys.exit(1) if not os.path.isfile(options.infile): print('No input file provided') parser.print_usage() sys.exit(1) return options if __name__ == "__main__": options = commandline_options() m2 = Mesh2D.read_VTK(options.infile) if options.plot: m2.plot() m3 = Mesh3D.extruded_Mesh2D(m2, [options.depth,], [options.num_cells,], [10000,]) m3.write_exodus(options.outfile)
bsd-3-clause
qrqiuren/sms-tools
lectures/03-Fourier-properties/plots-code/fft-zero-phase.py
24
1140
import matplotlib.pyplot as plt import numpy as np from scipy.fftpack import fft, fftshift import sys sys.path.append('../../../software/models/') import utilFunctions as UF (fs, x) = UF.wavread('../../../sounds/oboe-A4.wav') N = 512 M = 401 hN = N/2 hM = (M+1)/2 start = .8*fs xw = x[start-hM:start+hM-1] * np.hamming(M) plt.figure(1, figsize=(9.5, 6.5)) plt.subplot(411) plt.plot(np.arange(-hM, hM-1), xw, lw=1.5) plt.axis([-hN, hN-1, min(xw), max(xw)]) plt.title('x (oboe-A4.wav), M = 401') fftbuffer = np.zeros(N) fftbuffer[:hM] = xw[hM-1:] fftbuffer[N-hM+1:] = xw[:hM-1] plt.subplot(412) plt.plot(np.arange(0, N), fftbuffer, lw=1.5) plt.axis([0, N, min(xw), max(xw)]) plt.title('fftbuffer: N = 512') X = fftshift(fft(fftbuffer)) mX = 20 * np.log10(abs(X)/N) pX = np.unwrap(np.angle(X)) plt.subplot(413) plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5) plt.axis([-hN,hN-1,-100,max(mX)]) plt.title('mX') plt.subplot(414) plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5) plt.axis([-hN,hN-1,min(pX),max(pX)]) plt.title('pX') plt.tight_layout() plt.savefig('fft-zero-phase.png') plt.show()
agpl-3.0
leotrs/decu
test/notsosimple_project/src/script.py
1
1196
""" testscript.py ------------- This is a test script for decu. """ from decu import Script, experiment, figure, run_parallel import numpy as np import matplotlib.pyplot as plt class TestScript(Script): @experiment(data_param='data') def exp(self, data, param, param2): """Compute x**param for each data point.""" self.log.info('Working hard for {}..'.format(TestScript.exp.run)) return np.power(data, param) + param2 @figure() def plot_result(self, data, result): """Plot results of experiment.""" plt.plot(data, result) @figure() def plot_many_results(self, data, results): """Plot results of experiment.""" plt.figure() for res in results: plt.plot(data, res) def main(self): """Run some experiments and make some figures.""" data = np.arange(5) result1 = self.exp(data, param=4, param2=10) self.plot_result(data, result1) param_list = [(data, x, y) for x, y in zip(np.arange(5), np.arange(5, 10))] result2 = run_parallel(self.exp, param_list) self.plot_many_results(data, result2, suffix='parallel')
mit
dhaitz/CalibFW
plotting/modules/plot_sandbox.py
1
75936
# -*- coding: utf-8 -*- """ plotting sanbox module for merlin. This module is to be used for testing or development work. """ import plotbase import copy import plot1d import getroot import math import plotresponse import plotfractions import plot2d import plot_tagging import fit import os def recogen_alpha_ptbins(files, opt): """ recogen vs alpha as well as Z pT vs alpha in pT bins. """ zptbins = [ "1", "zpt>30 && zpt<50", "zpt>50 && zpt<70", "zpt>70 && zpt<120", "zpt>120" ] texts = [ "$\mathrm{inclusive}$", "$30 < \mathrm{Z} p_\mathrm{T} < 50\ \mathrm{GeV}$", "$50 < \mathrm{Z} p_\mathrm{T} < 70\ \mathrm{GeV}$", "$70 < \mathrm{Z} p_\mathrm{T} < 120\ \mathrm{GeV}$", "$\mathrm{Z}\ p_\mathrm{T} > 120\ \mathrm{GeV}$", ] fig, axes = plotbase.newPlot(subplots = len(zptbins * 2), subplots_X = len(zptbins)) settings = plotbase.getSettings(opt, quantity='recogen_alpha') for ax1, ax2, selection, text in zip(axes[:(len(axes)/2)], axes[(len(axes)/2):], zptbins, texts): plot1d.datamcplot("recogen_alpha", files, opt, fig_axes = [fig, ax1], changes={ 'allalpha': True, 'y': [0.99, 1.1], 'subplot': True, 'nbins': 6, 'fit': 'slope', 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plot1d.datamcplot("zpt_alpha", files, opt, fig_axes = [fig, ax2], changes={ 'allalpha': True, 'y': [0, 300], 'subplot': True, 'nbins': 6, 'x': [0, 0.3], 'text': text, 'selection': [selection], } ) plotbase.Save(fig, settings) def corrs(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen_genpt') for quantity, marker, color, label in zip( ['raw/recogen_genpt', 'l1/recogen_genpt', 'l1l2l3/recogen_genpt'], ['o', 'D', '-'], ['black', '#7293cb', '#e1974c'], ['raw', 'L1', 'L1L2L3'] ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'y': [0.9, 1.5], 'legloc': 'upper right', 'x': [20, 100], 'yname': 'recogen', 'xname':'genpt' }) settings['filename'] = plotbase.getDefaultFilename('recogen', opt, settings) plotbase.Save(fig, settings) def corrbins(files, opt): fig, ax = plotbase.newPlot() settings = plotbase.getSettings(opt, quantity='recogen') for quantity, marker, color, label, n in zip( ['l1l2l3/recogen3040', 'l1l2l3/recogen5080', 'l1l2l3/recogen100'], ['o', 'f', '-'], ['black', '#7293cb', '#e1974c'], ['pT 20-40', 'pT 50-80', 'pT >100'], range(10) ): plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes={ 'algorithm': "", 'markers':[marker], 'colors':[color], 'labels':[label, ""], 'correction':"", 'subplot':True, 'grid': True, 'fitlabel_offset':-0.07*n, 'legloc': 'upper right', 'x': [0, 2], 'xname':'recogen' }) settings['filename'] = plotbase.getDefaultFilename('recogen-bins', opt, settings) plotbase.Save(fig, settings) def zmassFitted(files, opt, changes=None, settings=None): """ Plots the FITTED Z mass peak position depending on pT, NPV, y.""" quantity = "zmass" # iterate over raw vs corr electrons for mode in ['raw', 'corr']: filenames = ['work/data_ee_%s.root' % mode, 'work/mc_ee_powheg_%s.root' % mode] files, opt = plotbase.openRootFiles(filenames, opt) # iterate over quantities for xq, xbins in zip( ['npv', 'zpt', 'zy'], [ [a - 0.5 for a, b in opt.npv] + [opt.npv[-1][1] - 0.5], opt.zbins, [(i/2.)-2. for i in range(0, 9)], ] ): # iterate over Z pt (inclusive/low,medium,high) for ptregion, ptselection, ptstring in zip(["_inclusivept", "_lowpt", "_mediumpt", "_highpt"], [ "1", "zpt<60", "zpt>60 && zpt < 120", "zpt>120", ], [ "", "Z $p_\mathrm{T}$ < 60 GeV", "60 < Z $p_\mathrm{T}$ < 120 GeV", "Z $p_\mathrm{T}$ > 120 GeV", ]): # iterate over electron eta regions for etaregion, etaselection, etastring in zip( ["_all", "_EBEB", "_EBEE", "_EEEE"], [ "1", "abs(eminuseta) < 1.5 && abs(epluseta) < 1.5", "((abs(eminuseta) < 1.5 && abs(epluseta) > 1.6) || (abs(epluseta) < 1.5 && abs(eminuseta) > 1.6))", "abs(eminuseta) > 1.6 && abs(epluseta) > 1.6", ], [ "", "EB-EB", "EB-EE & EE-EB", "EE-EE", ]): # we dont need pt-binned Z pT plots: if xq == 'zpt' and ptselection is not "1": continue rootobjects, rootobjects2 = [], [] fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ax2 = plotbase.plt.subplot2grid((3, 1), (2, 0)) ax2.number = 2 fig.add_axes(ax) fig.add_axes(ax2) # print the Z pt and electron eta region on the plot ax.text(0.98, 0.98, ptstring, va='top', ha='right', transform=ax.transAxes) ax.text(0.98, 0.9, etastring, va='top', ha='right', transform=ax.transAxes) changes = { 'y': [90.8, 94.8], 'yname': r'$m^{\mathrm{Z}}$ (peak position from Breit-Wigner fit) / GeV', 'legloc': 'upper left', 'title': mode + " electrons", 'labels': ['Data', 'Powheg'], } settings = plotbase.getSettings(opt, changes=changes, quantity=quantity + "_" + xq) # iterate over files markers = ['o', 'D'] ys, yerrs, xs = [], [], [] for i, f in enumerate(files): bins = xbins y, yerr, x = [], [], [] # iterate over bins for lower, upper in zip(bins[:-1], bins[1:]): changes = { 'selection': ['(%s > %s && %s < %s) && (%s) && (%s)' % (xq, lower, xq, upper, ptselection, etaselection)], 'nbins': 40, 'folder': 'zcuts', 'x': [71, 101], } local_settings = plotbase.getSettings(opt, changes, None, quantity) # get the zmass, fit, get the xq distribution; append to lists rootobjects += [getroot.histofromfile(quantity, f, local_settings, index=i)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], breitwigner=True, limits=local_settings['x']) y += [p1] yerr += [p1err] changes['x'] = [lower, upper] local_settings = plotbase.getSettings(opt, changes, None, quantity) rootobjects2 += [getroot.histofromfile(xq, f, local_settings, index=i)] x += [rootobjects2[-1].GetMean()] # fine line to indicate bin borders ax.add_line(plotbase.matplotlib.lines.Line2D((lower, upper), (y[-1],y[-1]), color='black', alpha=0.05)) ys.append(y) yerrs.append(yerr) xs.append(x) #plot ax.errorbar(x, y, yerr, drawstyle='steps-mid', color=settings['colors'][i], fmt=markers[i], capsize=0, label=settings['labels'][i]) # format and save if xq == 'zpt': settings['xlog'] = True settings['x'] = [30, 1000] settings['xticks'] = [30, 50, 70, 100, 200, 400, 1000] plot1d.formatting(ax, settings, opt, [], []) # calculate ratio values ratio_y = [d/m for d, m in zip(ys[0], ys[1])] ratio_yerrs = [math.sqrt((derr/d)**2 + (merr/m)**2)for d, derr, m, merr in zip(ys[0], yerrs[0], ys[1], yerrs[1])] ratio_x = [0.5 * (d + m) for d, m in zip(xs[0], xs[1])] #format ratio plot ax2.errorbar(ratio_x, ratio_y, ratio_yerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0, label='ratio') ax.axhline(1.0) fig.subplots_adjust(hspace=0.1) ax.set_xticklabels([]) ax.set_xlabel("") settings['ratio'] = True settings['legloc'] = None settings['xynames'][1] = 'ratio' plot1d.formatting(ax2, settings, opt, [], []) ax2.set_ylim(0.99, 1.01) settings['filename'] = plotbase.getDefaultFilename(quantity + "_" + xq + "_" + mode + ptregion + etaregion, opt, settings) plotbase.Save(fig, settings) def zmassEBEE(files, opt): """ Plot the Z mass depending on where the electrons are reconstructed. 3 bins: EB-EB, EB-EE, EE-EE """ selections = [ 'abs(eminuseta)<1.5 && abs(epluseta)<1.5', '(abs(eminuseta)>1.5 && abs(epluseta)<1.5) || abs(eminuseta)<1.5 && abs(epluseta)>1.5', 'abs(eminuseta)>1.5 && abs(epluseta)>1.5', ] filenames = ['zmass_ebeb', 'zmass_ebee', 'zmass_eeee'] titles = ['Barrel electrons only', 'One electron barrel, one endcap', 'Endcap electrons only'] for selection, filename, title in zip(selections, filenames, titles): plot1d.plot1dratiosubplot("zmass", files, opt, changes = { 'x': [81, 101], 'selection': [selection, "hlt * (%s)" % selection], 'fit': 'bw', 'nbins': 40, 'filename': filename, 'title': title, 'folder': 'zcuts', }) def eid(files, opt): quantity = 'mvaid' """changes = { 'x': [0, 1.0001], #'log': True, 'folder': 'electron_all', 'nbins':50, 'subplot':True, 'markers': ['f'], } settings = plotbase.getSettings(opt, quantity=quantity) fig, ax = plotbase.newPlot() for c, l, s in zip(['#236BB2', '#E5AD3D'], ['fake', 'true'], ['1', 'deltar < 0.3 && deltar>0']): changes.update({ 'labels': [l], 'colors': [c], 'selection': s, }) plot1d.datamcplot(quantity, files, opt, fig_axes = [fig, ax], changes=changes) settings['filename'] = plotbase.getDefaultFilename(quantity, opt, settings) plotbase.Save(fig, settings)""" ## id vs deltar for quantity in ["mvaid", "mvatrigid", "looseid", "mediumid", "tightid"]: plot1d.datamcplot("%s_deltar" % quantity, files, opt, changes = { 'folder': 'electron_all', 'nbins': 50, 'xynames': ['$\Delta$R(reco, gen)', quantity], 'x': [0, 0.5], 'legloc': None, }) def plots_2014_07_03(files, opt): """ Plots for JEC presentation 03.07. """ #### 2D histograms for obj, x, nbins in zip(['muon', 'jet', 'electron'], [[-2.5, 2.5], [-5.3, 5.3]]*2, [400, 1000, 300]): changes = { 'out': 'out/2014_07_03', 'y': [-3.2, 3.2], } changes.update({ 'folder': obj + "_all", 'nbins': nbins, 'x':x, 'filename': obj + '_phi_eta', 'xynames': ['%s eta' % obj, '%s phi' % obj, obj + 's'], }) if obj is 'electron': filenames = ["data_ee_noc", "mc_ee_corr_test"] else: filenames = ["data_noc", "mc_rundep_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) if obj is not 'electron': changes.update({ 'year': 2011, 'filename': obj + '_phi_eta_2011', 'lumi': 5.1, 'energy': 7, }) filenames = ["data11_noc"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] plot2d.twoD("phi_eta", files, opt, changes = changes) ##### PU Jet ID filenames = ["dataPUJETID", "data"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'normalize': False, 'ratiosubplot': 'True', 'ratiosubploty': [0.8, 1.2], 'out': 'out/2014_07_03', 'x': [30, 250], 'title': 'Data', 'labels': ['PUJetID applied', 'default'], } plot1d.datamcplot('zpt', files, opt, changes=changes) for typ in ['mpf', 'ptbalance']: plotresponse.responseratio(files, opt, over='zpt', types=[typ], changes={ 'labels': ['PUJetID applied', 'default'], 'out': 'out/2014_07_03', 'x': [30, 1000], 'xlog': True, }) ##### timedep filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'out': 'out/2014_07_03', 'filename': "timedep", } timedep(files, opt, changes=changes) ###### MPF fix filenames = [ "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-18_10-41/out.root", "/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root" ] files = [getroot.openfile(f) for f in filenames] plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={ 'labels': ['MCRD-fixed', 'MCRD'], 'xlog': True, 'filename': "mpf_zpt-fixed", 'out': 'out/2014_07_03', 'x': [30, 1000], 'xticks': [30, 50, 70, 100, 200, 400, 1000], }) # mpf slopes filenames = ["data", "mc_rundep"] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'filename': "mpfslopes-fixed", 'labels': ['data', 'MCRD'], 'out': 'out/2014_07_03', 'allalpha': True, 'selection': 'alpha<0.3', } mpfslopes(files, opt, changes) changes.update({ 'filename': "mpfslopes", 'labels': ['data', 'MCRD'], }) filenames = [ '/storage/a/dhaitz/excalibur/artus/data_2014-04-10_21-21/out.root', '/storage/a/dhaitz/excalibur/artus/mc_rundep_2014-06-06_14-26/out.root' ] files = [getroot.openfile(f) for f in filenames] mpfslopes(files, opt, changes) # SYNC os.system("rsync ${EXCALIBUR_BASE}/out/2014_07_03 ekplx26:plots -r") def timedep(files, opt, changes = None): """ Plots for the time dependence, requested by Mikko 2014-06-25.""" settings = plotbase.getSettings(opt, quantity="response_run", changes=changes) fig, ax = plotbase.newPlot() factor = 2e4 methods = ['mpf', 'ptbalance'] labels = ['MPF', '$p_T$ balance'] for q, c, l, m, in zip(methods, settings['colors'], labels, settings['markers']): slopes, serrs, x = [], [], [] for eta1, eta2 in zip(opt.eta[:-1], opt.eta[1:]): changes = { 'alleta': True, 'allalpha': True, 'selection': 'alpha<0.3 && abs(jet1eta) > %s && abs(jet1eta) < %s' % (eta1, eta2), 'fit': 'slope', } rootobject = getroot.histofromfile("%s_run" % q, files[0], settings, changes=changes) # get fit parameters slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope*factor] serrs += [serr*factor] changes['x'] = [0, 6] x += [getroot.histofromfile("abs(jet1eta)", files[0], settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [0, 5] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) plotbase.axislabels(ax, 'Leading jet $\eta$', 'Response vs run: linear fit slope (muliplied with 20 000)', settings=settings) ax.set_ylim(-0.1, 0.05) ax.set_xlim(0, 5.25) ax.grid(True) ax.set_xticks([float("%1.2f" % eta) for eta in opt.eta]) for label in ax.get_xticklabels(): label.set_rotation(45) ax.axhline(0.0, color='black', linestyle='--') settings['filename'] = quantity="response_run" plotbase.Save(fig, settings) def npuplot(files, opt): """ Plots for the JEC paper that Mikko requested 24.4.: npv and rho in bins of npu.""" settings = plotbase.getSettings(opt, quantity='npv') settings['x'] = [-0.5, 99.5] settings['nbins'] = 100 tgraphs = [] for f in files: if files.index(f) == 0: # flag bad runs in data runs = "run!=191411 && run!=198049 && run!=198050 && run!=198063 && run!=201727 && run!=203830 && run!=203832 && run!=203833 && run!=203834 && run!=203835 && run!=203987 && run!=203992 && run!=203994 && run!=204100 && run!=204101 && run!=208509" else: runs = 1 npuhisto = getroot.histofromfile('nputruth', f, settings) for i in range(100): if npuhisto.GetBinContent(i) > 0: npu = i tgraph = ROOT.TGraphErrors() for n in range(npu): changes = {'selection': 'nputruth>%s && nputruth<%s && %s' % (n-0.5, n+0.5, runs)} npv = getroot.histofromfile('npv', f, settings, changes=changes).GetMean() npverr = getroot.histofromfile('npv', f, settings, changes=changes).GetMeanError() rho = getroot.histofromfile('rho', f, settings, changes=changes).GetMean() rhoerr = getroot.histofromfile('rho', f, settings, changes=changes).GetMeanError() tgraph.SetPoint(n, npv, rho) tgraph.SetPointError(n, npverr, rhoerr) tgraphs.append(tgraph) settings['root'] = settings['root'] or settings['filename'] getroot.saveasroot(tgraphs, opt, settings) def electronupdate(files, opt): """Plots for the Zee update 26.06.2014.""" # Reco/gen electron pt vs eta filenames = ['mc_ee_raw', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes={ 'x': [0, 2.5], 'y': [0.9, 1.1], 'nbins': 25, 'labels': ['raw', 'corrected'], 'markers': ['o', '-'], 'colors': ['maroon', 'blue'], 'folder':'zcuts', 'y': [0.94, 1.06], 'title': 'Madgraph', 'xynames': [ r"$|\eta_{e^{-}} \| $", r'$\mathrm{e}^{-} p_\mathrm{T}$ Reco/Gen' ] } plot1d.datamcplot('eminuspt/geneminuspt_abs(eminuseta)', files, opt, changes=changes) changes={ 'ratiosubplot': True, 'title': 'Madgraph', 'x': [0, 1000], 'log': True, 'labels': ['raw', 'corrected'], 'folder': 'all', 'ratiosubplotfit': 'chi2', } plot1d.datamcplot('zpt', files, opt, changes=changes) #LHE information fig, ax = plotbase.newPlot() fig2, ax2 = plotbase.newPlot() changes ={ 'folder':'all', 'x': [-4, 4], 'y': [0, 200000], 'subplot': True, 'nbins':50, 'normalize': False, 'xynames': ['Z rapidity', 'Events'], 'log':True, } for q, c, m, l in zip( ['zy', 'genzy', 'lhezy'], ['black', 'lightskyblue', 'FireBrick'], ['o', 'f', '-'], ['RecoZ', 'GenZ', 'LHE-Z'], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot(q, files[1:], opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'rapidity') settings['filename'] = 'rapidity' plotbase.Save(fig, settings) # Data-MC comparisons ###################################################### # basic quantities filenames = ['data_ee_corr', 'mc_ee_corr'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'x': [-3, 3], 'y': [-3.2, 3.2], 'folder': 'all', 'nbins': 200, } plot2d.twoD('eminusphi_eminuseta', files, opt, changes=changes) for q, c in zip(['eminuspt', 'eminuseta', 'zy', 'zpt', 'zmass'], [ {}, {'x': [-2.5, 2.5]}, {}, {'x': [0, 500], 'log':True}, {'x': [80, 102], 'ratiosubploty':[0.9, 1.1]}, ]): changes = { 'labels': ['Data', 'Madgraph'], 'ratiosubplot': True, 'folder':'zcuts', 'nbins': 50, } changes.update(c) plot1d.datamcplot(q, files, opt, changes=changes) # scale factors changes = { 'x': [0, 100], 'y': [0, 3], 'z': [0.8, 1.2], 'folder': 'all', 'nbins': 100, 'selection': 'sfminus>0', 'colormap': 'bwr', } plot2d.twoD('sfminus_abs(eminuseta)_eminuspt', files[1:], opt, changes=changes) # zpt in rapidities for ybin in [[i/2., (i+1)/2.] for i in range(5)]: changes = { 'x': [0, 600], 'nbins': 30, 'folder':'zcuts', 'title': "%s < $y_Z$ < %s" % tuple(ybin), 'log': 'True', 'ratiosubplot': True, 'selection': 'abs(zy)>%s && abs(zy)<%s' % (ybin[0], ybin[1]), 'filename': ('zpt_rap-%s-%s' % (ybin[0], ybin[1])).replace('.', '_'), } plot1d.datamcplot('zpt', files, opt, changes=changes) # scale factor changes = { 'labels': ['Madgraph'], 'ratiosubplot': True, 'xynames':['eminuspt', r"$|\eta_{e^{-}} \| $"], 'folder':'all', 'x': [0, 60], 'y': [0, 3], 'colormap': 'bwr', 'z': [0.5, 1], } q = 'sfminus_abs(eminuseta)_eminuspt' plot2d.twoD(q, files[1:], opt, changes=changes) ############## # Plot for ID acceptance fig, ax = plotbase.newPlot() changes ={ 'folder':'all', 'x': [0, 150], 'y': [0, 1], 'subplot': True, 'normalize': False, 'legloc': 'lower right', 'xynames': ['eminuspt', 'Acceptance'] } filenames = ['mc_ee_corr_noid'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for q, c, m, l in zip( ['eminusidtight', 'eminusidmedium', 'eminusidloose', 'eminusidveto', 'eminusid'], ['lightskyblue', 'FireBrick', 'green', 'black', 'blue'], ['f', '_', '-', "o", "*"], ['Tight ID', 'Medium ID', 'Loose ID', "Veto ID", "MVA ID"], ): changes['labels'] = [l] changes['markers'] = [m] changes['colors'] = [c] plot1d.datamcplot("%s_eminuspt" % q, files, opt, changes=changes, fig_axes=[fig, ax]) settings = plotbase.getSettings(opt, None, None, 'id') settings['filename'] = 'id' settings['title'] = 'MC' plotbase.Save(fig, settings) def mpfslopes(files, opt, changes=None): """ Plot the slope of a linear fit on MPF vs NPV, in Z pT bins.""" quantity="mpf_npv" settings = plotbase.getSettings(opt, quantity=quantity, changes=changes) settings['special_binning'] = True print opt.zbins fig, ax = plotbase.newPlot() for f, c, l, m, in zip(files, settings['colors'], settings['labels'], settings['markers']): slopes, serrs, x = [], [], [] # iterate over Z pT bins for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): changes = {'selection':'zpt>%s && zpt<%s' % (ptlow, pthigh)} rootobject = getroot.histofromfile(quantity, f, settings, changes=changes) # get fit parameters and mean Z pT; append to lists slope, serr = fit.fitline2(rootobject)[2:4] slopes += [slope] serrs += [serr] x += [getroot.histofromfile("zpt", f, settings, changes=changes).GetMean()] ax.errorbar(x, slopes, serrs, drawstyle='steps-mid', color=c, fmt='o', capsize=0, label=l) #formatting stuff settings['x'] = [30, 100] plotbase.setAxisLimits(ax, settings) plotbase.labels(ax, opt, settings) ax.set_xscale('log') settings['xticks'] = opt.zbins plotbase.axislabels(ax, 'zpt', 'slope from fit on MPF vs NPV', settings=settings) ax.set_ylim(-0.002, 0.002) ax.grid(True) ax.axhline(0.0, color='black', linestyle='--') plotbase.Save(fig, settings) def pileup(files, opt): for ptlow, pthigh in zip(opt.zbins[:-1], opt.zbins[1:]): plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={ 'allalpha':True, 'selection':'alpha<0.3 && zpt>%s && zpt<%s' % (ptlow, pthigh), 'filename': "mpf_npv_%s-%s" % (ptlow, pthigh) } ) def emucomparison(files, opt): values = [] valueerrs = [] for filenames in [['data', 'mc'], ['data_ee', 'mc_ee']]: files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] for quantity in ['mpf', 'ptbalance']: settings = plotbase.getSettings(opt, None, None, quantity) settings['nbins'] = 40 settings['correction'] = 'L1L2L3' if 'ee' in filenames[0]: if settings['selection']: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0 && %s' % settings['selection'] else: settings['selection'] = 'abs(epluseta<1.0) && abs(eminuseta)<1.0' datamc = [] rootobjects = [] fitvalues = [] for f in files: rootobjects += [getroot.histofromfile(quantity, f, settings)] p0, p0err, p1, p1err, p2, p2err, chi2, ndf, conf_intervals = fit.fitline2(rootobjects[-1], gauss=True, limits=[0, 2]) fitvalues += [p1, p1err] ratio = fitvalues[0] / fitvalues[2] ratioerr = math.sqrt(fitvalues[1] ** 2 + fitvalues[3] ** 2) values.append(ratio) valueerrs.append(ratioerr) fig, ax = plotbase.newPlot() ax.errorbar(range(4), values, valueerrs, drawstyle='steps-mid', color='black', fmt='o', capsize=0,) ax.set_xticks([0, 1, 2, 3]) ax.set_xticklabels(['Zmm\nMPF', 'Zmm\npT balance', 'Zee\nMPF', 'Zee\npT balance']) ax.set_xlim(-0.5, 3.5) ax.set_ylim(0.96, 1.001) ax.axhline(1.0, color='black', linestyle=':') ax.set_ylabel('Jet response Data/MC ratio', ha="right", x=1) plotbase.Save(fig, settings) def electrons(files, opt): """ Standard set of plots for the dielectron analysis. """ filenames = ['data_ee', 'mc_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] base_changes = { 'out': 'out/ee2014', 'folder': 'zcuts', # no additional restrictions on jets 'normalize': False, # no normalizing to check if the lumi reweighting works 'factor': 1., # on the fly lumi reweighting 'efficiency': 1., # no trigger reweighting for electrons 'ratiosubplot': True, } # zmass with fit changes = { 'legloc': 'center right', 'nbins': 50, 'fit': 'gauss' } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) changes['legloc'] = 'center right' changes['filename'] = 'zmass_barrel' changes['selection'] = 'abs(epluseta)<1.0 && abs(eminuseta)<1.0' changes['title'] = '|eta(e)| < 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) changes['filename'] = 'zmass_endcap' changes['selection'] = 'abs(epluseta)>1.0 && abs(eminuseta)>1.0' changes['title'] = '|eta(e)| > 1.0' changes['fit'] = 'gauss' plot1d.datamcplot('zmass', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # Z pT in rapidity bins rapbins = ['abs(zy)<1', 'abs(zy)>1 && abs(zy)<2', 'abs(zy)>2 && abs(zy)<3'] raplabels = ['|Y(Z)|<1', '1<|Y(Z)|<2', '2<|Y(Z)|<3'] rapname = ['0zy1', '1zy2', '2zy3'] for rbin, rlabel, rname in zip(rapbins, raplabels, rapname): changes = { 'selection': rbin, 'filename': 'zpt-%s' % rname, 'x': [30, 750], 'log': True, 'title': rlabel, 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) #electron quantities for charge in ['plus', 'minus']: changes = { 'x': [0, 150], 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('e%spt' % charge, files, opt, changes=changes) changes['x'] = [-2.5, 2.5] plot1d.datamcplot('e%seta' % charge, files, opt, changes=changes) changes['x'] = None plot1d.datamcplot('e%sphi' % charge, files, opt, changes=changes) # npv changes = { 'folder': 'all', } changes.update(base_changes) changes['folder'] = 'all' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) changes['noweighting'] = True changes['factor'] = 3503.71 / 30459503 * 1000 changes['filename'] = 'npv_noweights' plot1d.datamcplot('npv', files, opt, changes=changes) # z pt and rapidity changes = { 'nbins': 40, } changes.update(base_changes) plot1d.datamcplot('zy', files, opt, changes=changes) plot1d.datamcplot('zeta', files, opt, changes=changes) changes['x'] = [30, 750] changes['log'] = True plot1d.datamcplot('zpt', files, opt, changes=changes) #powheg comparison filenames = ['data_ee', 'mc_ee', 'mc_ee_powheg'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zpt', files, opt, changes=changes) changes = { 'nbins': 40, 'filename': 'zmass_mad-pow', 'labels': ['Data', 'Madgraph', 'Powheg'], } changes.update(base_changes) plot1d.datamcplot('zmass', files, opt, changes=changes) files = files[::2] filenames = filenames[::2] changes = { 'log':True, 'x': [30, 750], 'nbins': 40, 'filename': 'zpt_pow', 'labels':['Data', 'Powheg'], } changes.update(base_changes) plot1d.Datamcplot('zpt', files, opt, changes=changes) #backgrounds filenames = ['Data_ee', 'mc_ee', 'background_ee'] files = [getroot.openfile("%s/work/%s.root" % (plotbase.os.environ['EXCALIBUR_BASE'], f), opt.verbose) for f in filenames] changes = { 'log': True, 'x': [30, 750], 'filename': 'zpt_backgrounds', 'labels': ['Data', 'MC', 'Backgrounds'], 'markers': ['o', 'f', 'f'], 'stacked': True, 'ratiosubplot': False, } changes.update(base_changes) changes['ratiosubplot'] = False plot1d.datamcplot('zpt', files, opt, changes=changes) changes.pop('x', None) changes['filename'] = 'zmass_backgrounds' changes['log'] = False changes['ratiosubplot'] = False plot1d.datamcplot('zmass', files, opt, changes=changes) # sync the plots import subprocess subprocess.call(['rsync out/ee2014 dhaitz@ekplx26:plots/ -u -r --progress'], shell=True) """ merlin 2D_zmass_zpt --files $DATAEE $ARGS -x 0 50 --nbins 100 -y 80 100 -o $OUT merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 120 -C lightskyblue -m f --folder all merlin eemass -o $OUT --files $DATAEE $ARGS --nbins 100 -x 0 15 --filename eemass_low -C lightskyblue -m f --folder all merlin 2D_zpt_zy -o $OUT --files $DATAEE $ARGS -y 0 100 --nbins 100 """ def an(files, opt): """ Plots for the 2014 Z->mumu JEC AN.""" """ #MET for quantity in ['METpt', 'METphi']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) plot1d.datamcplot("npv", files, opt, changes = {'folder': 'all', 'title': 'CMS preliminary'}) for n in ['1', '2']: for quantity in ['pt', 'eta', 'phi']: plot1d.datamcplot('mu%s%s' % (n, quantity), files, opt, changes = {'title': 'CMS preliminary'}) if n is '2' and quantity is 'eta': plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'nbins': 10, 'correction': 'L1L2L3', 'title': 'CMS preliminary'}) else: plot1d.datamcplot('jet%s%s' % (n, quantity), files, opt, changes = {'correction': 'L1L2L3', 'title': 'CMS preliminary'}) for quantity in ['zpt', 'zeta', 'zy', 'zphi', 'zmass']: plot1d.datamcplot(quantity, files, opt, changes = {'title': 'CMS preliminary'}) #response stuff plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'x': [0, 35, 0, 35]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1]}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 35, 0, 35]}) """ for q in ['mpf', 'ptbalance']: plot1d.datamcplot(q, files, opt, changes={'correction': 'L1L2L3', 'legloc': 'center right', 'nbins': 100, 'fit': 'gauss'}) plotresponse.extrapol(files, opt, changes={'save_individually': True, 'correction': 'L1L2L3'}) """ plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400], 'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='jet1abseta', changes = {'title': 'CMS preliminary'}) plotfractions.fractions(files, opt, over='npv', changes = {'title': 'CMS preliminary'}) for changes in [{'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) plotresponse.response_run(files, opt, changes=changes) opt.out = opt.out[:-5] opt.user_options['out'] = opt.user_options['out'][:-5] else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) plotresponse.response_run(files, opt, changes=changes) changes['y'] = [0.84, 1.2] plot2d.twoD("qgtag_btag", files, opt, changes = {'title': 'CMS Preliminary', 'nbins':50} ) plot_tagging.tagging_response(files, opt) plot_tagging.tagging_response_corrected(files, opt) """ ## MCONLY if len(files) > 1: files = files[1:] """ # PF composition as function of mc flavour flavour_comp(files, opt, changes={'title': 'CMS Simulation','mconly':True}) # response vs flavour for var in [True, False]: plotresponse.response_physflavour(files, opt, changes={'title': 'CMS Simulation','mconly':True}, add_neutrinopt=var, restrict_neutrals=var, extrapolation=var) plotfractions.flavour_composition(files, opt, changes={'title': 'CMS Simulation','mconly':True}) plotfractions.flavour_composition_eta(files, opt, changes={'title': 'CMS Simulation','mconly':True, 'selection': 'zpt>95 && zpt<110'}) changes = {'cutlabel' : 'ptetaalpha', 'labels' : ['Pythia 6 Tune Z2*', 'Herwig++ Tune EE3C'], 'y' : [0.98, 1.05], 'markers' : ['o', 'd'], 'colors' : ['red', 'blue'], 'title' : 'CMS Simulation', 'mconly' : True, 'legloc' : 'lower left', 'filename': 'recogen_physflavour_pythia-herwig'} files += [getroot.openfile("/storage/a/dhaitz/excalibur/work/mc_herwig/out/closure.root")] plot1d.datamcplot("recogen_physflavour", files, opt, changes=changes) """ def eleven(files, opt): """ Summary of the plots for the response studies with 2011 rereco. """ runrange = [160000, 183000] plot1d.datamcplot('npv', files, opt, changes={'rebin': 1}) plot1d.datamcplot('zmass', files, opt, changes={'fit': 'vertical', 'legloc': 'center right'}) plotresponse.extrapol(files, opt) plotresponse.responseratio(files, opt, over='zpt', types=['mpf'], changes={'y': [0.98, 1.03, 0.96, 1.03], 'uncertaintyband': True, 'x': [0, 400, 0, 400]}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['mpf'], changes={'y': [0.95, 1.1, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['mpf'], changes={'y': [0.95, 1.05, 0.92, 1.03], 'uncertaintyband': True, 'x': [0, 18, 0, 18]}) plotresponse.responseratio(files, opt, over='zpt', types=['ptbalance'], changes={'y': [0.93, 1.01, 0.96, 1.03], 'x': [0, 400, 0, 400], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='jet1abseta', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.93, 1.1], 'uncertaintyband': True}) plotresponse.responseratio(files, opt, over='npv', types=['ptbalance'], changes={'y': [0.91, 1.01, 0.92, 1.03], 'x': [0, 18, 0, 18], 'uncertaintyband': True}) plot1d.datamcplot('npv_run', files, opt, changes={'x': runrange, 'y': [0, 15], 'run': True, 'fit': True}) plotfractions.fractions(files, opt, over='zpt', changes={'x': [0, 400]}) plotfractions.fractions(files, opt, over='jet1abseta') plotfractions.fractions(files, opt, over='npv', changes={'x': [-0.5, 24.5]}) for changes in [{'x': runrange, 'rebin':10, 'title':'|$\eta^{\mathrm{jet}}$|<1.3'}, {'x': runrange, 'alleta':True, 'rebin':10, 'selection':'jet1abseta>2.5 && jet1abseta<2.964', 'title':'2.5<|$\eta^{\mathrm{jet}}$|<2.964'}]: if 'alleta' in changes: opt.out += '/ECOT' opt.user_options['out'] += '/ECOT' plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes, nbr=6) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes, nbr=6) else: plotfractions.fractions_run(files, opt, diff=True, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=False, response=True, changes=changes) plotfractions.fractions_run(files, opt, diff=True, response=False, changes=changes) changes['y'] = [0.84, 1.2] plotresponse.response_run(files, opt, changes=changes) def rootfile(files, opt): """Function for the rootfile sent to the JEC group in early August 2013.""" list_of_quantities = ['ptbalance_alpha', 'mpf_alpha', 'ptbalance', 'mpf', 'zpt', 'npv', 'zmass', 'zpt_alpha', 'npv_alpha', 'ptbalance_zpt', 'mpf_zpt', 'ptbalance_npv', 'mpf_npv', ] for muon in [["zmumu", "1"], ["zmumu_muoncuts", "(mupluspt>25 && muminuspt>25 && abs(mupluseta)<1.0 && abs(muminuseta)<1.0)"]]: for alpha in [[0, "alpha<0.2", "alpha0_2"], [1, "alpha<0.3", "alpha0_3"], [1, "alpha<0.4", "alpha0_4"]]: for quantity in list_of_quantities: changes = {'rebin': 1, 'out': 'out/root/', 'allalpha': True, 'root': "__".join([quantity, alpha[2]]), 'filename': muon[0], 'selection': "&&".join([alpha[1], muon[1]]), } if ("_zpt" in quantity) or ("_npv" in quantity): changes['special_binning'] = True if "alpha" in quantity: changes['rebin'] = 10 plot1d.datamcplot(quantity, files, opt, changes=changes) changes['ratio'] = True changes['labels'] = ['ratio'] plot1d.datamcplot(quantity, files, opt, changes=changes) def ineff(files, opt): settings = plotbase.getSettings(opt, changes=None, settings=None, quantity="flavour_zpt") fig, ax = plotbase.newPlot() labels = ["no matching partons", "two matching partons"] colors = ['red', 'blue'] markers = ['o', 'd'] changes = {'subplot': True, 'lumi': 0, 'xynames': ['zpt', 'physflavourfrac'], 'legloc': 'upper left', } for n, l, c, m in zip([0, 2], labels, colors, markers): quantity = "(nmatchingpartons3==%s)_zpt" % n changes['labels'] = [l] changes['colors'] = c changes['markers'] = m plot1d.datamcplot(quantity, files, opt, fig_axes=(fig, ax), changes=changes, settings=settings) settings['filename'] = plotbase.getDefaultFilename("physflavourfrac_zpt", opt, settings) plotbase.Save(fig, settings['filename'], opt) def flav(files, opt): etabins = [0, 1.3, 2.5, 3, 3.2, 5.2] etastrings = ['0-1_3', '1_3-2_5', '2_5-3', '3-3_2', '3_2-5_2'] flavourdefs = ["algoflavour", "physflavour"] flavourdefinitions = ["algorithmic", "physics"] flist = ["(flavour>0&&flavour<4)", "(flavour==1)", "(flavour==2)", "(flavour==3)", "(flavour==4)", "(flavour==5)", "(flavour==21)", "(flavour==0)"] q_names = ['uds', 'u', 'd', 's', 'c', 'b', 'gluon', 'unmatched'] changes = {} ############### FLAVOUR NOT 0!!!!! # barrel: """changes['rebin'] = 1 changes['filename']="flavour" changes['filename']="flavour" for f_id, quantity in zip(['uds','c','b','gluon'], flist): changes['root']=f_id plot1d.datamcplot("%s_zpt" % quantity, files, opt, changes=changes) """ for flavourdef, flavourdefinition in zip(flavourdefs, flavourdefinitions): # iterate over eta bins: for filename, selection in zip(etastrings, getroot.etacuts(etabins)): changes['filename'] = "_".join([filename, flavourdefinition]) changes['alleta'] = True changes['selection'] = "%s && %s" % (selection, "alpha<0.2") changes['rebin'] = 1 for f_id, quantity in zip(q_names, flist): changes['root'] = f_id plot1d.datamcplot("%s_zpt" % quantity.replace("flavour", flavourdef), files, opt, changes=changes) def gif(files, opt): local_opt = copy.deepcopy(opt) runlist = listofruns.runlist[::10] for run, number in zip(runlist, range(len(runlist))): local_opt.lumi = (run - 190456) * 19500 / (209465 - 190456) print plotbase.plot1d.datamcplot('balresp', files, local_opt, changes={'var': 'var_RunRange_0to%s' % run}, filename="%03d" % number) def closure(files, opt): def divide((a, a_err), (b, b_err)): if (b != 0.0): R = a / b else: R = 0 Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr def multiply((a, a_err), (b, b_err)): R = a * b Rerr = R * math.sqrt((a_err / a) ** 2 + (b_err / b) ** 2) return R, Rerr changes = {} changes = plotbase.getchanges(opt, changes) #get extrapol factors with alpha 035 #changes['var']='var_CutSecondLeadingToZPt_0_4' #changes['correction']='L1L2L3' balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) intercept, ierr, slope, serr, chi2, ndf, conf_intervals = getroot.fitline2(getroot.getobjectfromnick('ptbalance_alpha', files[0], changes, rebin=1)) balresp_extrapol = (intercept, conf_intervals[0]) extrapol_reco_factor = divide(balresp_extrapol, balresp) intercept2, ierr2, slope2, serr2, chi22, ndf2, conf_intervals2 = getroot.fitline2(getroot.getobjectfromnick('genbalance_genalpha', files[0], changes, rebin=1)) genbal_extrapol = (intercept2, conf_intervals2[0]) extrapol_gen_factor = divide(genbal_extrapol, genbal) intercept3, ierr3, slope3, serr3, chi23, ndf3, conf_intervals3 = getroot.fitline2(getroot.getobjectfromnick('mpf_alpha', files[0], changes, rebin=1)) mpf_extrapol = (intercept3, conf_intervals3[0]) extrapol_mpf_factor = divide(mpf_extrapol, mpfresp) #del changes['var'] #del changes['correction'] #other quantities with alpha 02 recogen = (getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('recogen', files[0], changes, rebin=1).GetMeanError()) zresp = (getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('zresp', files[0], changes, rebin=1).GetMeanError()) balresp = (getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balresp', files[0], changes, rebin=1).GetMeanError()) mpfresp = (getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp', files[0], changes, rebin=1).GetMeanError()) mpfresp_raw = (getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('mpfresp-raw', files[0], changes, rebin=1).GetMeanError()) genbal = (getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('genbal', files[0], changes, rebin=1).GetMeanError()) balparton = (getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMean(), getroot.getobjectfromnick('balparton', files[0], changes, rebin=1).GetMeanError()) partoncorr = divide(balparton, genbal) format = "%1.4f" print changes print "" print (r"balresp reco %s +- %s" % (format, format)) % balresp print (r"mpf %s +- %s" % (format, format)) % mpfresp print (r"balparton %s +- %s" % (format, format)) % balparton print (r"zresp %s +- %s" % (format, format)) % zresp print (r"recogen %s +- %s" % (format, format)) % recogen print (r"extrapolReco_factor %s +- %s" % (format, format)) % extrapol_reco_factor print (r"extrapolGen_factor %s +- %s" % (format, format)) % extrapol_gen_factor print (r"extrapolMPF_factor %s +- %s" % (format, format)) % extrapol_mpf_factor print (r"parton/genjet %s +- %s" % (format, format)) % divide(balparton, genbal) print "" print (r"pTgenjet / pTgenZ %s +- %s" % (format, format)) % genbal genbal = multiply(genbal, extrapol_gen_factor) print (r"* gen Level extrapolation %s +- %s" % (format, format)) % genbal #genbal = multiply(genbal, partoncorr) #print (r"* pTparton/pTgenjet correction %s +- %s" % (format, format) ) % genbal #genbal = divide(genbal, balparton) #print (r"* pTparton/pTZ correction %s +- %s" % (format, format) ) % genbal reco_bal = divide(multiply(genbal, recogen), zresp) print (r"* GenToReco for Jet and Z %s +- %s" % (format, format)) % reco_bal print "" print (r"pTrecojet / pTrecoZ %s +- %s" % (format, format)) % balresp balresp = multiply(balresp, extrapol_reco_factor) print (r"* reco Level extrapolation %s +- %s" % (format, format)) % balresp print "" print (r"MPF (typeI) %s +- %s" % (format, format)) % mpfresp #mpfresp = divide(mpfresp, zresp) #print (r"MPF (GenZ) %s +- %s" % (format, format) ) % mpfresp mpfresp = multiply(mpfresp, extrapol_mpf_factor) print (r"MPF (extrapol) %s +- %s" % (format, format)) % mpfresp print (r"MPF (Raw) %s +- %s" % (format, format)) % mpfresp_raw def extrapola(files, opt): fig, ax = plotbase.newPlot() changes = {} changes['var'] = "_var_CutSecondLeadingToZPt_0_3" local_opt = copy.deepcopy(opt) rebin = 5 if opt.rebin is not None: rebin = opt.rebin plot1d.datamcplot('ptbalance_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False) local_opt.colors = ['red', 'maroon'] plot1d.datamcplot('mpf_alpha', files, local_opt, legloc='upper center', changes=changes, rebin=rebin, subplot=True, xy_names=['alpha', 'response'], subtext="", fig_axes=(fig, ax), fit='intercept', ratio=False, fit_offset=-0.1) file_name = plotbase.getDefaultFilename("extrapolation_", opt, changes) plotbase.Save(fig, file_name, opt) # function for comparing old and new corrections def comparison(datamc, opt): """file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root' ]""" colors = ['red', 'blue', 'blue', 'red'] markers = ['*', 'o', 'o', '*'] #labels = [['MC_52xFast', 'data_52xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xFast', 'data_53xFast'], ['MC_53xOff', 'data_53xOff']] rebin = 1 import copy file_names = [ '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', ] labels = [['MC_52xFast', 'data_52xFast'], ['MC_53xFast', 'data_53xFast'], ['MC_52xOff', 'data_52xOff'], ['MC_53xOff', 'data_53xOff']] files = [] for f in file_names: files += [getroot.openfile(f, opt.verbose)] local_opt = copy.deepcopy(opt) local_opt.style = markers local_opt.colors = colors quantity = 'L1abs_npv' # ALL fig, axes = plotbase.newPlot(subplots=4) for a, f1, f2, l in zip(axes, files[::2], files[1::2], labels): local_opt.labels = l datamcplot(quantity, (f1, f2), local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, a), rebin=rebin, subplot=True, subtext="") filename = "L1_all__" + opt.algorithm plotbase.Save(fig, filename, opt) """ #Fastjet vs Offset fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") #53 local_opt.labels = labels[2] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_Fastjet_vs_Offset__"+opt.algorithm plotbase.Save(fig, filename, opt) #52X vs 53X fig = plotbase.plt.figure(figsize=(14,7)) axes = [fig.add_subplot(1,2,n) for n in [1,2]] local_opt.labels = labels[0] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[0], files[1]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[2] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[4], files[5]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[0]), rebin=rebin, subplot=True, subtext="") local_opt.labels = labels[1] local_opt.colors = ['blue', 'blue'] datamcplot(quantity, (files[2], files[3]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") # local_opt.labels = labels[3] local_opt.colors = ['red', 'red'] datamcplot(quantity, (files[6], files[7]), local_opt, 'upper center', changes={'correction':''}, fig_axes=(fig,axes[1]), rebin=rebin, subplot=True, subtext="") filename = "L1_52X_vs_53X__"+opt.algorithm plotbase.Save(fig, filename, opt) import plotresponse file_names = [ '/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', ] labels = [['data_52xFast', 'MC_52xFast'], [ 'data_53xFast', 'MC_53xFast'], [ 'data_52xOff', 'MC_52xOff'], ['data_53xOff', 'MC_53xOff']] files=[] for f in file_names: files += [getroot.openfile(f, opt.verbose)] for over, fit in zip(['zpt', 'jet1eta', 'npv'], [True, False, True]): fig, axes= plotbase.newPlot(subplots=4) fig2, axes2= plotbase.newPlot(subplots=4) for a1, a2, f1, f2, l in zip(axes, axes2, files[::2], files[1::2], labels): local_opt.labels = l changes ={}# {'correction':'L1L2L3'} plotresponse.responseplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig,a1), subplot=True, subtext="") plotresponse.ratioplot((f1, f2), local_opt, ['bal', 'mpf'], over=over, changes=changes, figaxes=(fig2 ,a2), fit=fit, subplot=True, subtext="") filename = "Response_"+over+"_all__"+opt.algorithm plotbase.Save(fig, filename, opt) filename = "Ratio_"+over+"_all__"+opt.algorithm plotbase.Save(fig2, filename, opt)""" # function for 2d grid plots """def twoD_all_grid(quantity, datamc, opt): pt_thresholds = [12, 16, 20, 24, 28, 32, 36] var_list = ['var_JetPt_%1.fto%1.f' % (s1, s2) for (s1, s2) in zip(pt_thresholds, [1000, 1000, 1000, 1000, 1000, 1000, 1000])] var_list_2 = getroot.npvstrings(opt.npv) fig = plt.figure(figsize=(10.*len(var_list), 7.*len(var_list_2))) grid = AxesGrid(fig, 111, nrows_ncols = (len(var_list), len(var_list_2)), axes_pad = 0.4, share_all=True, label_mode = "L", #aspect = True, #cbar_pad = 0, #cbar_location = "right", #cbar_mode='single', ) for n1, var1 in enumerate(var_list): for n2, var2 in enumerate(var_list_2): change = {'var':var1+"_"+var2} index = len(var_list_2)*n1 + n2 change['incut']='allevents' twoD(quantity, datamc, opt, changes=change, fig_axes = [fig, grid[index]], subplot = True, axtitle = change['var'].replace('var_', '')) for grid_element, var_strings in zip(grid, opt.npv): text = r"$%s\leq\mathrm{NPV}\leq%s$" % var_strings grid_element.text(0.5, 5.5, text, ha='center', va='center', size ='40') for grid_element, pt_threshold in zip(grid[::len(var_list_2)], pt_thresholds): text = r"$p_\mathrm{T}^\mathrm{Jet1}$"+"\n"+r"$\geq%s\mathrm{GeV}$" % pt_threshold grid_element.text(-8.7, 0, text, ha='left', va='center', size ='30') #fig.suptitle("%s leading jet $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], opt.algorithm, opt.correction), size='50') fig.suptitle("%s %s $\eta-\phi$ distribution ($before$ cuts) for %s %s" % (opt.labels[0], quantity[7:-16], opt.algorithm, opt.correction), size='30') file_name = "grid_"+opt.labels[0]+"_"+quantity +"_"+opt.algorithm + opt.correction fig.set_figwidth(fig.get_figwidth() * 1.2) plotbase.Save(fig, file_name, opt, crop=False, pad=1.5)""" def Fall12(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'] ] labellist = [['data_Summer12', 'MC_Summer12'], ['data_Fall12V1', 'MC_Fall12V1'], ['data_Fall12V4', 'MC_Fall12V4']] over = 'zpt' for over in ['zpt', 'npv', 'jet1eta']: fig = plotbase.plt.figure(figsize=[21, 14]) fig.suptitle(opt.title, size='xx-large') for typ, row in zip(['bal', 'mpf'], [0, 4]): for filenames, labels, col in zip(filelist, labellist, [0, 1, 2]): ax1 = plotbase.plt.subplot2grid((7, 3), (row, col), rowspan=2) ax2 = plotbase.plt.subplot2grid((7, 3), (row + 2, col)) fig.add_axes(ax1) fig.add_axes(ax2) if over == 'jet1eta' and typ == 'bal': legloc = 'upper right' else: legloc = 'lower left' local_opt.labels = labels files = [] for f in filenames: files += [getroot.openfile(f, opt.verbose)] plotresponse.responseplot(files, local_opt, [typ], over=over, figaxes=(fig, ax1), legloc=legloc, subplot=True) plotresponse.ratioplot(files, local_opt, [typ], binborders=True, fit=True, over=over, subplot=True, figaxes=(fig, ax2), ratiosubplot=True) fig.subplots_adjust(hspace=0.05) ax1.set_xticks([]) ax1.set_xlabel("") ax2.set_yticks([1.00, 0.95, 0.90]) if col > 0: ax1.set_ylabel("") ax2.set_ylabel("") title = "" # " Jet Response ($p_T$ balance / MPF) vs. Z $p_T$, $N_{vtx}$ , Jet $\eta$ (" +opt.algorithm+" "+opt.correction+")" fig.suptitle(title, size='x-large') file_name = "comparison_ALL_" + over + opt.algorithm + opt.correction plotbase.Save(fig, file_name, opt) def factors(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellist = [ ['Data FastJet V1', 'MC FastJet V1', 'Data Offset V1', 'MC Offset V1'], ['Data FastJet V4', 'MC FastJet V4', 'Data Offset V4', 'MC Offset V4']] """filelistt = [ ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/Data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = ['Data FastJet V1', 'Data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['Data Offset V1', 'Data Offset V4'], ['MC Offset V1','MC Offset V4' ]] names = ['DataV1', 'MCV1', 'DataV4', 'MCV4' ]""" files = [] #for sublist in filelist: # rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] # files.append( rootfiles) for sublist in filelist: files.append([getroot.openfile(f, opt.verbose) for f in sublist]) fit = None rebin = 1 # for files, labellist, name in zip(files, labellist, names) fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = labellist[0] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, files[0], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, subplot=True, subtext="") local_opt.labels = labellist[1] plot1d.datamcplot(quantity, files[1], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, subplot=True, subtext="") file_name = "L1_comparison_" # +name plotbase.Save(fig, file_name, opt) def factors2(files, opt): local_opt = copy.deepcopy(opt) filelist = [ ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/data_2012_Fall12JEC_V4_L1Offset/out/closure.root'], ['/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_L1Offset/out/closure.root', '/storage/8/dhaitz/CalibFW/work/mc_madgraphSummer12_Fall12JEC_V4_L1Offset/out/closure.root'] ] labellistt = [['data FastJet V1', 'data FastJet V4'], ['MC FastJet V1', 'MC FastJet V4'], ['data Offset V1', 'data Offset V4'], ['MC Offset V1', 'MC Offset V4'] ] names = ['dataV1', 'MCV1', 'dataV4', 'MCV4'] files = [] for sublist in filelist: rootfiles = [getroot.openfile(f, opt.verbose) for f in sublist] files.append(rootfiles) #print files fit = 'chi2_linear' rebin = 1 fit_offset = -0.1 for files, labellist, name in zip(files, labellistt, names): print labellist fig, axes = plotbase.newPlot(subplots=2) quantity = 'L1abs_npv' local_opt.style = ['o', '*', 'o', '*'] local_opt.labels = [labellist[0]] local_opt.colors = ['blue', 'blue', 'red', 'red'] plot1d.datamcplot(quantity, [files[0]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[0]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") local_opt.labels = [labellist[1]] plot1d.datamcplot(quantity, [files[1]], local_opt, 'upper center', changes={'correction': ''}, fig_axes=(fig, axes[1]), fit=fit, rebin=rebin, fit_offset=fit_offset, subplot=True, subtext="") file_name = "L1_comparison_" + name plotbase.Save(fig, file_name, opt) import ROOT def allpu(files, opt, truth=True): print files settings = plotbase.getSettings(opt, quantity='npu') #print settings print settings['folder'] name = "_".join([settings['folder'], settings['algorithm'] + settings['correction']]) print name, files[1] name = name.replace("Res", "") t = files[1].Get(name) if not t: print "no tree", name, t.GetName() exit(1) # raw wei data weight if truth: histos = [getroot.getobject("pileup", files[2])] else: histos = [getroot.getobject("pileup;2", files[2])] histos[-1].Rebin(10) print histos[-1].GetNbinsX(), "pu2" histos[0].SetTitle("Data") histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] if truth: histos += [ROOT.TH1D("mcraw", "MC", 1600, 0, 80)] t.Project("mcraw", "nputruth") else: histos += [ROOT.TH1D("mcraw", "MC", 80, 0, 80)] t.Project("mcraw", "npu") if truth: histos += [ROOT.TH1D("mcwei", "MC'", 1600, 0, 80)] t.Project("mcwei", "nputruth", "weight") else: histos += [ROOT.TH1D("mcwei", "MC'", 80, 0, 80)] t.Project("mcwei", "npu") binning = [[0, 1, 2, 3.5, 5], range(45, 80)] for h in histos: if h.GetNbinsX() > 1000: h.Rebin() if h.GetNbinsX() > 82: print h.GetNbinsX(), ">82! in", h.GetTitle() if not truth: break print "rebin:", binning b = binning if histos.index(h) == 1: b = binning + [range(5, 46)] print b for l in b: for a, b in zip(l[:-1], l[1:]): x1 = h.FindBin(a) x2 = h.FindBin(b) sumh = sum([h.GetBinContent(i) for i in range(x1, x2)]) / (x2 - x1) for i in range(x1, x2): h.SetBinContent(i, sumh) if truth: f = histos[1].Integral() / histos[1].Integral(histos[1].FindBin(8), histos[1].FindBin(40)) for i in range(3 + 0 * len(histos)): #histos[i].Rebin(4) print i ff = f / histos[i].Integral(histos[i].FindBin(8), histos[i].FindBin(40)) ff = 1.0 / histos[i].Integral() histos[i].Scale(ff) histos += [histos[0].Clone("dataraw")] histos[-1].SetTitle("Data/MC") histos[-1].Divide(histos[1]) if len(files) > 3: histos += [getroot.getobject("pileup", files[3])] histos[-1].SetTitle("weight") histos += [histos[2].Clone("rawmc")] histos[-1].Divide(histos[1]) histos[-1].SetTitle("MC'/MC") histos += [histos[0].Clone("datamc")] histos[-1].Divide(histos[2]) histos[-1].SetTitle("Data/MC'") plots = [getroot.root2histo(h) for h in histos] fig, ax, ratio = plotbase.newPlot(ratio=True) fig = plotbase.plt.figure(figsize=[7, 10]) ax = plotbase.plt.subplot2grid((3, 1), (0, 0), rowspan=2) ax.number = 1 ratio = plotbase.plt.subplot2grid((3, 1), (2, 0)) ratio.number = 2 fig.add_axes(ax) fig.add_axes(ratio) fig.subplots_adjust(hspace=0.05) colors = ['black', 'navy', 'red', 'green'] for p, c in zip(plots[:3], colors): ax.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) colors[1] = 'gray' for p, c in zip(plots[3:], colors): r = ratio.errorbar(p.x, p.y, label=p.title, drawstyle='steps-post', color=c, lw=1.6) plotbase.labels(ax, opt, settings, settings['subplot']) plotbase.axislabels(ax, r"$n_\mathrm{PU}", settings['xynames'][1], settings=settings) xaxistext = r"observed number of pile-up interactions $n_\mathrm{PU}$" if truth: xaxistext = xaxistext.replace("observed", "true") plotbase.axislabels(ratio, xaxistext, "ratio", settings=settings) print ratio.number, r plotbase.setAxisLimits(ax, settings) plotbase.labels(ratio, opt, settings, settings['subplot']) plotbase.setAxisLimits(ratio, settings) #handles, labels = ratio.get_legend_handles_labels() ratio.legend(bbox_to_anchor=[0.8, 1], loc='upper center') ax.set_xticklabels([]) ax.set_xlabel("") settings['filename'] = plotbase.getDefaultFilename("npus", opt, settings) plotbase.Save(fig, settings) def pu(files, opt): allpu(files, opt) def puobserved(files, opt): allpu(files, opt, False)
gpl-2.0
steven-murray/pydftools
setup.py
1
2408
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages import io import os import re with open("README.rst") as readme_file: readme = readme_file.read() with open("HISTORY.rst") as history_file: history = history_file.read() requirements = [ "scipy", "numpy>=1.6.2", "Click>=6.0", "attrs>=17.0", "cached_property", "chainconsumer", "matplotlib" # TODO: put package requirements here ] def read(*names, **kwargs): with io.open( os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8"), ) as fp: return fp.read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") setup_requirements = [ "pytest-runner", # TODO(steven-murray): put setup requirements (distutils extensions, etc.) here ] test_requirements = [ "pytest", # TODO: put package test requirements here ] setup( name="pydftools", version=find_version("pydftools", "__init__.py"), description="A pure-python port of the dftools R package.", long_description=readme + "\n\n" + history, author="Steven Murray", author_email="steven.murray@curtin.edu.au", url="https://github.com/steven-murray/pydftools", packages=find_packages(include=["pydftools"]), entry_points={"console_scripts": ["pydftools=pydftools.cli:main"]}, include_package_data=True, install_requires=requirements, license="MIT license", zip_safe=False, keywords="pydftools", classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", ], test_suite="tests", tests_require=test_requirements, setup_requires=setup_requirements, )
mit
wenhuchen/ETHZ-Bootstrapped-Captioning
visual-concepts/coco/PythonAPI/pycocotools/coco.py
1
16953
__author__ = 'tylin' __version__ = '2.0' # Interface for accessing the Microsoft COCO dataset. # Microsoft COCO is a large image dataset designed for object detection, # segmentation, and caption generation. pycocotools is a Python API that # assists in loading, parsing and visualizing the annotations in COCO. # Please visit http://mscoco.org/ for more information on COCO, including # for the data, paper, and tutorials. The exact format of the annotations # is also described on the COCO website. For example usage of the pycocotools # please see pycocotools_demo.ipynb. In addition to this API, please download both # the COCO images and annotations in order to run the demo. # An alternative to using the API is to load the annotations directly # into Python dictionary # Using the API provides additional utility functions. Note that this API # supports both *instance* and *caption* annotations. In the case of # captions not all functions are defined (e.g. categories are undefined). # The following API functions are defined: # COCO - COCO api class that loads COCO annotation file and prepare data structures. # decodeMask - Decode binary mask M encoded via run-length encoding. # encodeMask - Encode binary mask M using run-length encoding. # getAnnIds - Get ann ids that satisfy given filter conditions. # getCatIds - Get cat ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadCats - Load cats with the specified ids. # loadImgs - Load imgs with the specified ids. # segToMask - Convert polygon segmentation to binary mask. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # download - Download COCO images from mscoco.org server. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # Help on each functions can be accessed by: "help COCO>function". # See also COCO>decodeMask, # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds, # COCO>getImgIds, COCO>loadAnns, COCO>loadCats, # COCO>loadImgs, COCO>segToMask, COCO>showAnns # Microsoft COCO Toolbox. version 2.0 # Data, paper, and tutorials available at: http://mscoco.org/ # Code written by Piotr Dollar and Tsung-Yi Lin, 2014. # Licensed under the Simplified BSD License [see bsd.txt] import json import time import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon import numpy as np import urllib import copy import itertools import mask import os from collections import defaultdict class COCO: def __init__(self, annotation_file=None): """ Constructor of Microsoft COCO helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :param image_folder (str): location to the folder that hosts images. :return: """ # load dataset self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict() self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list) if not annotation_file == None: print 'loading annotations into memory...' tic = time.time() dataset = json.load(open(annotation_file, 'r')) assert type(dataset)==dict, "annotation file format %s not supported"%(type(dataset)) print 'Done (t=%0.2fs)'%(time.time()- tic) self.dataset = dataset self.createIndex() def createIndex(self): # create index print 'creating index...' anns,cats,imgs = dict(),dict(),dict() imgToAnns,catToImgs = defaultdict(list),defaultdict(list) if 'annotations' in self.dataset: for ann in self.dataset['annotations']: imgToAnns[ann['image_id']].append(ann) anns[ann['id']] = ann if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat for ann in self.dataset['annotations']: catToImgs[ann['category_id']].append(ann['image_id']) print 'index created!' # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print '%s: %s'%(key, value) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (int array) : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) iscrowd (boolean) : get anns for given crowd label (False or True) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.dataset['annotations'] else: if not len(imgIds) == 0: lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns] anns = list(itertools.chain.from_iterable(lists)) else: anns = self.dataset['annotations'] anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds] anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]] if not iscrowd == None: ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd] else: ids = [ann['id'] for ann in anns] return ids def getCatIds(self, catNms=[], supNms=[], catIds=[]): """ filtering parameters. default skips that filter. :param catNms (str array) : get cats for given cat names :param supNms (str array) : get cats for given supercategory names :param catIds (int array) : get cats for given cat ids :return: ids (int array) : integer array of cat ids """ catNms = catNms if type(catNms) == list else [catNms] supNms = supNms if type(supNms) == list else [supNms] catIds = catIds if type(catIds) == list else [catIds] if len(catNms) == len(supNms) == len(catIds) == 0: cats = self.dataset['categories'] else: cats = self.dataset['categories'] cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms] cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms] cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds] ids = [cat['id'] for cat in cats] return ids def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) for i, catId in enumerate(catIds): if i == 0 and len(ids) == 0: ids = set(self.catToImgs[catId]) else: ids &= set(self.catToImgs[catId]) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 if 'segmentation' in anns[0] or 'keypoints' in anns[0]: datasetType = 'instances' elif 'caption' in anns[0]: datasetType = 'captions' else: raise Exception("datasetType not supported") if datasetType == 'instances': ax = plt.gca() ax.set_autoscale_on(False) polygons = [] color = [] for ann in anns: c = (np.random.random((1, 3))*0.6+0.4).tolist()[0] if 'segmentation' in ann: if type(ann['segmentation']) == list: # polygon for seg in ann['segmentation']: poly = np.array(seg).reshape((len(seg)/2, 2)) polygons.append(Polygon(poly)) color.append(c) else: # mask t = self.imgs[ann['image_id']] if type(ann['segmentation']['counts']) == list: rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width']) else: rle = [ann['segmentation']] m = mask.decode(rle) img = np.ones( (m.shape[0], m.shape[1], 3) ) if ann['iscrowd'] == 1: color_mask = np.array([2.0,166.0,101.0])/255 if ann['iscrowd'] == 0: color_mask = np.random.random((1, 3)).tolist()[0] for i in range(3): img[:,:,i] = color_mask[i] ax.imshow(np.dstack( (img, m*0.5) )) if 'keypoints' in ann and type(ann['keypoints']) == list: # turn skeleton into zero-based index sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1 kp = np.array(ann['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] for sk in sks: if np.all(v[sk]>0): plt.plot(x[sk],y[sk], linewidth=3, color=c) plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2) plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2) p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) ax.add_collection(p) p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) ax.add_collection(p) elif datasetType == 'captions': for ann in anns: print ann['caption'] def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO() res.dataset['images'] = [img for img in self.dataset['images']] print 'Loading and preparing results... ' tic = time.time() if type(resFile) == str or type(resFile) == unicode: anns = json.load(open(resFile)) elif type(resFile) == np.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [ann['image_id'] for ann in anns] assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if 'caption' in anns[0]: imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] for id, ann in enumerate(anns): ann['id'] = id+1 elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): bb = ann['bbox'] x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]] if not 'segmentation' in ann: ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = bb[2]*bb[3] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'segmentation' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann['area'] = mask.area([ann['segmentation']])[0] if not 'bbox' in ann: ann['bbox'] = mask.toBbox([ann['segmentation']])[0] ann['id'] = id+1 ann['iscrowd'] = 0 elif 'keypoints' in anns[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for id, ann in enumerate(anns): s = ann['keypoints'] x = s[0::3] y = s[1::3] x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y) ann['area'] = (x1-x0)*(y1-y0) ann['id'] = id + 1 ann['bbox'] = [x0,y0,x1-x0,y1-y0] print 'DONE (t=%0.2fs)'%(time.time()- tic) res.dataset['annotations'] = anns res.createIndex() return res def download( self, tarDir = None, imgIds = [] ): ''' Download COCO images from mscoco.org server. :param tarDir (str): COCO results directory name imgIds (list): images to be downloaded :return: ''' if tarDir is None: print 'Please specify target directory' return -1 if len(imgIds) == 0: imgs = self.imgs.values() else: imgs = self.loadImgs(imgIds) N = len(imgs) if not os.path.exists(tarDir): os.makedirs(tarDir) for i, img in enumerate(imgs): tic = time.time() fname = os.path.join(tarDir, img['file_name']) if not os.path.exists(fname): urllib.urlretrieve(img['coco_url'], fname) print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic) def loadNumpyAnnotations(self, data): """ Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class} :param data (numpy.ndarray) :return: annotations (python nested list) """ print("Converting ndarray to lists...") assert(type(data) == np.ndarray) print(data.shape) assert(data.shape[1] == 7) N = data.shape[0] ann = [] for i in range(N): if i % 1000000 == 0: print("%d/%d" % (i,N)) ann += [{ 'image_id' : int(data[i, 0]), 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ], 'score' : data[i, 5], 'category_id': int(data[i, 6]), }] return ann
bsd-3-clause
wanggang3333/scikit-learn
examples/svm/plot_svm_anova.py
250
2000
""" ================================================= SVM-Anova: SVM with univariate feature selection ================================================= This example shows how to perform univariate feature before running a SVC (support vector classifier) to improve the classification scores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets, feature_selection, cross_validation from sklearn.pipeline import Pipeline ############################################################################### # Import some data to play with digits = datasets.load_digits() y = digits.target # Throw away data, to be in the curse of dimension settings y = y[:200] X = digits.data[:200] n_samples = len(y) X = X.reshape((n_samples, -1)) # add 200 non-informative features X = np.hstack((X, 2 * np.random.random((n_samples, 200)))) ############################################################################### # Create a feature-selection transform and an instance of SVM that we # combine together to have an full-blown estimator transform = feature_selection.SelectPercentile(feature_selection.f_classif) clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))]) ############################################################################### # Plot the cross-validation score as a function of percentile of features score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title( 'Performance of the SVM-Anova varying the percentile of features selected') plt.xlabel('Percentile') plt.ylabel('Prediction rate') plt.axis('tight') plt.show()
bsd-3-clause
sammcveety/incubator-beam
sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py
8
4457
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set. We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i """ from __future__ import absolute_import import argparse import apache_beam as beam from apache_beam.io import WriteToText def from_pixel(x, y, n): """Converts a NxN pixel position to a (-1..1, -1..1) complex number.""" return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0) def get_julia_set_point_color(element, c, n, max_iterations): """Given an pixel, convert it into a point in our julia set.""" x, y = element z = from_pixel(x, y, n) for i in xrange(max_iterations): if z.real * z.real + z.imag * z.imag > 2.0: break z = z * z + c return x, y, i # pylint: disable=undefined-loop-variable def generate_julia_set_colors(pipeline, c, n, max_iterations): """Compute julia set coordinates for each point in our set.""" def point_set(n): for x in range(n): for y in range(n): yield (x, y) julia_set_colors = (pipeline | 'add points' >> beam.Create(point_set(n)) | beam.Map( get_julia_set_point_color, c, n, max_iterations)) return julia_set_colors def generate_julia_set_visualization(data, n, max_iterations): """Generate the pixel matrix for rendering the julia set as an image.""" import numpy as np # pylint: disable=wrong-import-order, wrong-import-position colors = [] for r in range(0, 256, 16): for g in range(0, 256, 16): for b in range(0, 256, 16): colors.append((r, g, b)) xy = np.zeros((n, n, 3), dtype=np.uint8) for x, y, iteration in data: xy[x, y] = colors[iteration * len(colors) / max_iterations] return xy def save_julia_set_visualization(out_file, image_array): """Save the fractal image of our julia set as a png.""" from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position plt.imsave(out_file, image_array, format='png') def run(argv=None): # pylint: disable=missing-docstring parser = argparse.ArgumentParser() parser.add_argument('--grid_size', dest='grid_size', default=1000, help='Size of the NxN matrix') parser.add_argument( '--coordinate_output', dest='coordinate_output', required=True, help='Output file to write the color coordinates of the image to.') parser.add_argument('--image_output', dest='image_output', default=None, help='Output file to write the resulting image to.') known_args, pipeline_args = parser.parse_known_args(argv) with beam.Pipeline(argv=pipeline_args) as p: n = int(known_args.grid_size) coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100) # Group each coordinate triplet by its x value, then write the coordinates # to the output file with an x-coordinate grouping per line. # pylint: disable=expression-not-assigned (coordinates | 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i))) | 'x coord' >> beam.GroupByKey() | 'format' >> beam.Map( lambda (k, coords): ' '.join('(%s, %s, %s)' % c for c in coords)) | WriteToText(known_args.coordinate_output)) # Optionally render the image and save it to a file. # TODO(silviuc): Add this functionality. # if p.options.image_output is not None: # julia_set_image = generate_julia_set_visualization( # file_with_coordinates, n, 100) # save_julia_set_visualization(p.options.image_output, julia_set_image)
apache-2.0
equialgo/scikit-learn
examples/linear_model/plot_lasso_and_elasticnet.py
73
2074
""" ======================================== Lasso and Elastic Net for Sparse Signals ======================================== Estimates Lasso and Elastic-Net regression models on a manually generated sparse signal corrupted with an additive noise. Estimated coefficients are compared with the ground-truth. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import r2_score ############################################################################### # generate some sparse data to play with np.random.seed(42) n_samples, n_features = 50, 200 X = np.random.randn(n_samples, n_features) coef = 3 * np.random.randn(n_features) inds = np.arange(n_features) np.random.shuffle(inds) coef[inds[10:]] = 0 # sparsify coef y = np.dot(X, coef) # add noise y += 0.01 * np.random.normal((n_samples,)) # Split data in train set and test set n_samples = X.shape[0] X_train, y_train = X[:n_samples / 2], y[:n_samples / 2] X_test, y_test = X[n_samples / 2:], y[n_samples / 2:] ############################################################################### # Lasso from sklearn.linear_model import Lasso alpha = 0.1 lasso = Lasso(alpha=alpha) y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test) r2_score_lasso = r2_score(y_test, y_pred_lasso) print(lasso) print("r^2 on test data : %f" % r2_score_lasso) ############################################################################### # ElasticNet from sklearn.linear_model import ElasticNet enet = ElasticNet(alpha=alpha, l1_ratio=0.7) y_pred_enet = enet.fit(X_train, y_train).predict(X_test) r2_score_enet = r2_score(y_test, y_pred_enet) print(enet) print("r^2 on test data : %f" % r2_score_enet) plt.plot(enet.coef_, color='lightgreen', linewidth=2, label='Elastic net coefficients') plt.plot(lasso.coef_, color='gold', linewidth=2, label='Lasso coefficients') plt.plot(coef, '--', color='navy', label='original coefficients') plt.legend(loc='best') plt.title("Lasso R^2: %f, Elastic Net R^2: %f" % (r2_score_lasso, r2_score_enet)) plt.show()
bsd-3-clause
massmutual/scikit-learn
examples/plot_kernel_approximation.py
262
8004
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please note that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2] # Now predict the value of the digit on the second half: data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:] #data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: plt.figure(figsize=(8, 8)) accuracy = plt.subplot(211) # second y axis for timeings timescale = plt.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Gemerate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] plt.tight_layout() plt.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired) plt.title(titles[i]) plt.tight_layout() plt.show()
bsd-3-clause
barak/autograd
examples/fluidsim/wing.py
1
6136
from __future__ import absolute_import from __future__ import print_function import autograd.numpy as np from autograd import value_and_grad from scipy.optimize import minimize import matplotlib.pyplot as plt import os from builtins import range rows, cols = 40, 60 # Fluid simulation code based on # "Real-Time Fluid Dynamics for Games" by Jos Stam # http://www.intpowertechcorp.com/GDC03.pdf def occlude(f, occlusion): return f * (1 - occlusion) def project(vx, vy, occlusion): """Project the velocity field to be approximately mass-conserving, using a few iterations of Gauss-Seidel.""" p = np.zeros(vx.shape) div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1) + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0)) div = make_continuous(div, occlusion) for k in range(50): p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1) + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0 p = make_continuous(p, occlusion) vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1)) vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0)) vx = occlude(vx, occlusion) vy = occlude(vy, occlusion) return vx, vy def advect(f, vx, vy): """Move field f according to x and y velocities (u and v) using an implicit Euler integrator.""" rows, cols = f.shape cell_xs, cell_ys = np.meshgrid(np.arange(cols), np.arange(rows)) center_xs = (cell_xs - vx).ravel() center_ys = (cell_ys - vy).ravel() # Compute indices of source cells. left_ix = np.floor(center_ys).astype(np.int) top_ix = np.floor(center_xs).astype(np.int) rw = center_ys - left_ix # Relative weight of right-hand cells. bw = center_xs - top_ix # Relative weight of bottom cells. left_ix = np.mod(left_ix, rows) # Wrap around edges of simulation. right_ix = np.mod(left_ix + 1, rows) top_ix = np.mod(top_ix, cols) bot_ix = np.mod(top_ix + 1, cols) # A linearly-weighted sum of the 4 surrounding cells. flat_f = (1 - rw) * ((1 - bw)*f[left_ix, top_ix] + bw*f[left_ix, bot_ix]) \ + rw * ((1 - bw)*f[right_ix, top_ix] + bw*f[right_ix, bot_ix]) return np.reshape(flat_f, (rows, cols)) def make_continuous(f, occlusion): non_occluded = 1 - occlusion num = np.roll(f, 1, axis=0) * np.roll(non_occluded, 1, axis=0)\ + np.roll(f, -1, axis=0) * np.roll(non_occluded, -1, axis=0)\ + np.roll(f, 1, axis=1) * np.roll(non_occluded, 1, axis=1)\ + np.roll(f, -1, axis=1) * np.roll(non_occluded, -1, axis=1) den = np.roll(non_occluded, 1, axis=0)\ + np.roll(non_occluded, -1, axis=0)\ + np.roll(non_occluded, 1, axis=1)\ + np.roll(non_occluded, -1, axis=1) return f * non_occluded + (1 - non_occluded) * num / ( den + 0.001) def sigmoid(x): return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1. def simulate(vx, vy, num_time_steps, occlusion, ax=None, render=False): occlusion = sigmoid(occlusion) # Disallow occlusion outside a certain area. mask = np.zeros((rows, cols)) mask[10:30, 10:30] = 1.0 occlusion = occlusion * mask # Initialize smoke bands. red_smoke = np.zeros((rows, cols)) red_smoke[rows/4:rows/2] = 1 blue_smoke = np.zeros((rows, cols)) blue_smoke[rows/2:3*rows/4] = 1 print("Running simulation...") vx, vy = project(vx, vy, occlusion) for t in range(num_time_steps): plot_matrix(ax, red_smoke, occlusion, blue_smoke, t, render) vx_updated = advect(vx, vx, vy) vy_updated = advect(vy, vx, vy) vx, vy = project(vx_updated, vy_updated, occlusion) red_smoke = advect(red_smoke, vx, vy) red_smoke = occlude(red_smoke, occlusion) blue_smoke = advect(blue_smoke, vx, vy) blue_smoke = occlude(blue_smoke, occlusion) plot_matrix(ax, red_smoke, occlusion, blue_smoke, num_time_steps, render) return vx, vy def plot_matrix(ax, r, g, b, t, render=False): if ax: plt.cla() ax.imshow(np.concatenate((r[...,np.newaxis], g[...,np.newaxis], b[...,np.newaxis]), axis=2)) ax.set_xticks([]) ax.set_yticks([]) plt.draw() if render: plt.savefig('step{0:03d}.png'.format(t), bbox_inches='tight') plt.pause(0.001) if __name__ == '__main__': simulation_timesteps = 20 print("Loading initial and target states...") init_vx = np.ones((rows, cols)) init_vy = np.zeros((rows, cols)) # Initialize the occlusion to be a block. init_occlusion = -np.ones((rows, cols)) init_occlusion[15:25, 15:25] = 0.0 init_occlusion = init_occlusion.ravel() def drag(vx): return np.mean(init_vx - vx) def lift(vy): return np.mean(vy - init_vy) def objective(params): cur_occlusion = np.reshape(params, (rows, cols)) final_vx, final_vy = simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion) return -lift(final_vy) / drag(final_vx) # Specify gradient of objective function using autograd. objective_with_grad = value_and_grad(objective) fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, frameon=False) def callback(weights): cur_occlusion = np.reshape(weights, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, cur_occlusion, ax) print("Rendering initial flow...") callback(init_occlusion) print("Optimizing initial conditions...") result = minimize(objective_with_grad, init_occlusion, jac=True, method='CG', options={'maxiter':50, 'disp':True}, callback=callback) print("Rendering optimized flow...") final_occlusion = np.reshape(result.x, (rows, cols)) simulate(init_vx, init_vy, simulation_timesteps, final_occlusion, ax, render=True) print("Converting frames to an animated GIF...") # Using imagemagick. os.system("convert -delay 5 -loop 0 step*.png " "-delay 250 step{0:03d}.png wing.gif".format(simulation_timesteps)) os.system("rm step*.png")
mit
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/table.py
2
17111
""" Place a table below the x-axis at location loc. The table consists of a grid of cells. The grid need not be rectangular and can have holes. Cells are added by specifying their row and column. For the purposes of positioning the cell at (0, 0) is assumed to be at the top left and the cell at (max_row, max_col) is assumed to be at bottom right. You can add additional cells outside this range to have convenient ways of positioning more interesting grids. Author : John Gill <jng@europe.renre.com> Copyright : 2004 John Gill and John Hunter License : matplotlib license """ from __future__ import division, print_function import warnings import artist from artist import Artist, allow_rasterization from patches import Rectangle from cbook import is_string_like from matplotlib import docstring from text import Text from transforms import Bbox class Cell(Rectangle): """ A cell is a Rectangle with some associated text. """ PAD = 0.1 # padding between text and rectangle def __init__(self, xy, width, height, edgecolor='k', facecolor='w', fill=True, text='', loc=None, fontproperties=None ): # Call base Rectangle.__init__(self, xy, width=width, height=height, edgecolor=edgecolor, facecolor=facecolor) self.set_clip_on(False) # Create text object if loc is None: loc = 'right' self._loc = loc self._text = Text(x=xy[0], y=xy[1], text=text, fontproperties=fontproperties) self._text.set_clip_on(False) def set_transform(self, trans): Rectangle.set_transform(self, trans) # the text does not get the transform! def set_figure(self, fig): Rectangle.set_figure(self, fig) self._text.set_figure(fig) def get_text(self): 'Return the cell Text intance' return self._text def set_fontsize(self, size): self._text.set_fontsize(size) def get_fontsize(self): 'Return the cell fontsize' return self._text.get_fontsize() def auto_set_font_size(self, renderer): """ Shrink font size until text fits. """ fontsize = self.get_fontsize() required = self.get_required_width(renderer) while fontsize > 1 and required > self.get_width(): fontsize -= 1 self.set_fontsize(fontsize) required = self.get_required_width(renderer) return fontsize @allow_rasterization def draw(self, renderer): if not self.get_visible(): return # draw the rectangle Rectangle.draw(self, renderer) # position the text self._set_text_position(renderer) self._text.draw(renderer) def _set_text_position(self, renderer): """ Set text up so it draws in the right place. Currently support 'left', 'center' and 'right' """ bbox = self.get_window_extent(renderer) l, b, w, h = bbox.bounds # draw in center vertically self._text.set_verticalalignment('center') y = b + (h / 2.0) # now position horizontally if self._loc == 'center': self._text.set_horizontalalignment('center') x = l + (w / 2.0) elif self._loc == 'left': self._text.set_horizontalalignment('left') x = l + (w * self.PAD) else: self._text.set_horizontalalignment('right') x = l + (w * (1.0 - self.PAD)) self._text.set_position((x, y)) def get_text_bounds(self, renderer): """ Get text bounds in axes co-ordinates. """ bbox = self._text.get_window_extent(renderer) bboxa = bbox.inverse_transformed(self.get_data_transform()) return bboxa.bounds def get_required_width(self, renderer): """ Get width required for this cell. """ l, b, w, h = self.get_text_bounds(renderer) return w * (1.0 + (2.0 * self.PAD)) def set_text_props(self, **kwargs): 'update the text properties with kwargs' self._text.update(kwargs) class Table(Artist): """ Create a table of cells. Table can have (optional) row and column headers. Each entry in the table can be either text or patches. Column widths and row heights for the table can be specifified. Return value is a sequence of text, line and patch instances that make up the table """ codes = {'best': 0, 'upper right': 1, # default 'upper left': 2, 'lower left': 3, 'lower right': 4, 'center left': 5, 'center right': 6, 'lower center': 7, 'upper center': 8, 'center': 9, 'top right': 10, 'top left': 11, 'bottom left': 12, 'bottom right': 13, 'right': 14, 'left': 15, 'top': 16, 'bottom': 17, } FONTSIZE = 10 AXESPAD = 0.02 # the border between the axes and table edge def __init__(self, ax, loc=None, bbox=None): Artist.__init__(self) if is_string_like(loc) and loc not in self.codes: warnings.warn('Unrecognized location %s. Falling back on ' 'bottom; valid locations are\n%s\t' % (loc, '\n\t'.join(self.codes.iterkeys()))) loc = 'bottom' if is_string_like(loc): loc = self.codes.get(loc, 1) self.set_figure(ax.figure) self._axes = ax self._loc = loc self._bbox = bbox # use axes coords self.set_transform(ax.transAxes) self._texts = [] self._cells = {} self._autoRows = [] self._autoColumns = [] self._autoFontsize = True self._cachedRenderer = None def add_cell(self, row, col, *args, **kwargs): """ Add a cell to the table. """ xy = (0, 0) cell = Cell(xy, *args, **kwargs) cell.set_figure(self.figure) cell.set_transform(self.get_transform()) cell.set_clip_on(False) self._cells[(row, col)] = cell def _approx_text_height(self): return (self.FONTSIZE / 72.0 * self.figure.dpi / self._axes.bbox.height * 1.2) @allow_rasterization def draw(self, renderer): # Need a renderer to do hit tests on mouseevent; assume the last one # will do if renderer is None: renderer = self._cachedRenderer if renderer is None: raise RuntimeError('No renderer defined') self._cachedRenderer = renderer if not self.get_visible(): return renderer.open_group('table') self._update_positions(renderer) keys = self._cells.keys() keys.sort() for key in keys: self._cells[key].draw(renderer) #for c in self._cells.itervalues(): # c.draw(renderer) renderer.close_group('table') def _get_grid_bbox(self, renderer): """Get a bbox, in axes co-ordinates for the cells. Only include those in the range (0,0) to (maxRow, maxCol)""" boxes = [self._cells[pos].get_window_extent(renderer) for pos in self._cells.iterkeys() if pos[0] >= 0 and pos[1] >= 0] bbox = Bbox.union(boxes) return bbox.inverse_transformed(self.get_transform()) def contains(self, mouseevent): """Test whether the mouse event occurred in the table. Returns T/F, {} """ if callable(self._contains): return self._contains(self, mouseevent) # TODO: Return index of the cell containing the cursor so that the user # doesn't have to bind to each one individually. if self._cachedRenderer is not None: boxes = [self._cells[pos].get_window_extent(self._cachedRenderer) for pos in self._cells.iterkeys() if pos[0] >= 0 and pos[1] >= 0] bbox = Bbox.union(boxes) return bbox.contains(mouseevent.x, mouseevent.y), {} else: return False, {} def get_children(self): 'Return the Artists contained by the table' return self._cells.values() get_child_artists = get_children # backward compatibility def get_window_extent(self, renderer): 'Return the bounding box of the table in window coords' boxes = [cell.get_window_extent(renderer) for cell in self._cells.values()] return Bbox.union(boxes) def _do_cell_alignment(self): """ Calculate row heights and column widths. Position cells accordingly. """ # Calculate row/column widths widths = {} heights = {} for (row, col), cell in self._cells.iteritems(): height = heights.setdefault(row, 0.0) heights[row] = max(height, cell.get_height()) width = widths.setdefault(col, 0.0) widths[col] = max(width, cell.get_width()) # work out left position for each column xpos = 0 lefts = {} cols = widths.keys() cols.sort() for col in cols: lefts[col] = xpos xpos += widths[col] ypos = 0 bottoms = {} rows = heights.keys() rows.sort() rows.reverse() for row in rows: bottoms[row] = ypos ypos += heights[row] # set cell positions for (row, col), cell in self._cells.iteritems(): cell.set_x(lefts[col]) cell.set_y(bottoms[row]) def auto_set_column_width(self, col): self._autoColumns.append(col) def _auto_set_column_width(self, col, renderer): """ Automagically set width for column. """ cells = [key for key in self._cells if key[1] == col] # find max width width = 0 for cell in cells: c = self._cells[cell] width = max(c.get_required_width(renderer), width) # Now set the widths for cell in cells: self._cells[cell].set_width(width) def auto_set_font_size(self, value=True): """ Automatically set font size. """ self._autoFontsize = value def _auto_set_font_size(self, renderer): if len(self._cells) == 0: return fontsize = self._cells.values()[0].get_fontsize() cells = [] for key, cell in self._cells.iteritems(): # ignore auto-sized columns if key[1] in self._autoColumns: continue size = cell.auto_set_font_size(renderer) fontsize = min(fontsize, size) cells.append(cell) # now set all fontsizes equal for cell in self._cells.itervalues(): cell.set_fontsize(fontsize) def scale(self, xscale, yscale): """ Scale column widths by xscale and row heights by yscale. """ for c in self._cells.itervalues(): c.set_width(c.get_width() * xscale) c.set_height(c.get_height() * yscale) def set_fontsize(self, size): """ Set the fontsize of the cell text ACCEPTS: a float in points """ for cell in self._cells.itervalues(): cell.set_fontsize(size) def _offset(self, ox, oy): 'Move all the artists by ox,oy (axes coords)' for c in self._cells.itervalues(): x, y = c.get_x(), c.get_y() c.set_x(x + ox) c.set_y(y + oy) def _update_positions(self, renderer): # called from renderer to allow more precise estimates of # widths and heights with get_window_extent # Do any auto width setting for col in self._autoColumns: self._auto_set_column_width(col, renderer) if self._autoFontsize: self._auto_set_font_size(renderer) # Align all the cells self._do_cell_alignment() bbox = self._get_grid_bbox(renderer) l, b, w, h = bbox.bounds if self._bbox is not None: # Position according to bbox rl, rb, rw, rh = self._bbox self.scale(rw / w, rh / h) ox = rl - l oy = rb - b self._do_cell_alignment() else: # Position using loc (BEST, UR, UL, LL, LR, CL, CR, LC, UC, C, TR, TL, BL, BR, R, L, T, B) = range(len(self.codes)) # defaults for center ox = (0.5 - w / 2) - l oy = (0.5 - h / 2) - b if self._loc in (UL, LL, CL): # left ox = self.AXESPAD - l if self._loc in (BEST, UR, LR, R, CR): # right ox = 1 - (l + w + self.AXESPAD) if self._loc in (BEST, UR, UL, UC): # upper oy = 1 - (b + h + self.AXESPAD) if self._loc in (LL, LR, LC): # lower oy = self.AXESPAD - b if self._loc in (LC, UC, C): # center x ox = (0.5 - w / 2) - l if self._loc in (CL, CR, C): # center y oy = (0.5 - h / 2) - b if self._loc in (TL, BL, L): # out left ox = - (l + w) if self._loc in (TR, BR, R): # out right ox = 1.0 - l if self._loc in (TR, TL, T): # out top oy = 1.0 - b if self._loc in (BL, BR, B): # out bottom oy = - (b + h) self._offset(ox, oy) def get_celld(self): 'return a dict of cells in the table' return self._cells def table(ax, cellText=None, cellColours=None, cellLoc='right', colWidths=None, rowLabels=None, rowColours=None, rowLoc='left', colLabels=None, colColours=None, colLoc='center', loc='bottom', bbox=None): """ TABLE(cellText=None, cellColours=None, cellLoc='right', colWidths=None, rowLabels=None, rowColours=None, rowLoc='left', colLabels=None, colColours=None, colLoc='center', loc='bottom', bbox=None) Factory function to generate a Table instance. Thanks to John Gill for providing the class and table. """ # Check we have some cellText if cellText is None: # assume just colours are needed rows = len(cellColours) cols = len(cellColours[0]) cellText = [[''] * rows] * cols rows = len(cellText) cols = len(cellText[0]) for row in cellText: assert len(row) == cols if cellColours is not None: assert len(cellColours) == rows for row in cellColours: assert len(row) == cols else: cellColours = ['w' * cols] * rows # Set colwidths if not given if colWidths is None: colWidths = [1.0 / cols] * cols # Check row and column labels rowLabelWidth = 0 if rowLabels is None: if rowColours is not None: rowLabels = [''] * cols rowLabelWidth = colWidths[0] elif rowColours is None: rowColours = 'w' * rows if rowLabels is not None: assert len(rowLabels) == rows offset = 0 if colLabels is None: if colColours is not None: colLabels = [''] * rows offset = 1 elif colColours is None: colColours = 'w' * cols offset = 1 if rowLabels is not None: assert len(rowLabels) == rows # Set up cell colours if not given if cellColours is None: cellColours = ['w' * cols] * rows # Now create the table table = Table(ax, loc, bbox) height = table._approx_text_height() # Add the cells for row in xrange(rows): for col in xrange(cols): table.add_cell(row + offset, col, width=colWidths[col], height=height, text=cellText[row][col], facecolor=cellColours[row][col], loc=cellLoc) # Do column labels if colLabels is not None: for col in xrange(cols): table.add_cell(0, col, width=colWidths[col], height=height, text=colLabels[col], facecolor=colColours[col], loc=colLoc) # Do row labels if rowLabels is not None: for row in xrange(rows): table.add_cell(row + offset, -1, width=rowLabelWidth or 1e-15, height=height, text=rowLabels[row], facecolor=rowColours[row], loc=rowLoc) if rowLabelWidth == 0: table.auto_set_column_width(-1) ax.add_table(table) return table docstring.interpd.update(Table=artist.kwdoc(Table))
mit
tienjunhsu/trading-with-python
sandbox/spreadCalculations.py
78
1496
''' Created on 28 okt 2011 @author: jev ''' from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener from tradingWithPython.lib import yahooFinance from pandas import DataFrame, Series import numpy as np import matplotlib.pyplot as plt import os symbols = ['SPY','IWM'] y = yahooFinance.HistData('temp.csv') y.startDate = (2007,1,1) df = y.loadSymbols(symbols,forceDownload=False) #df = y.downloadData(symbols) res = readBiggerScreener('CointPairs.csv') #---check with spread scanner #sp = DataFrame(index=symbols) # #sp['last'] = df.ix[-1,:] #sp['targetCapital'] = Series({'SPY':100,'IWM':-100}) #sp['targetShares'] = sp['targetCapital']/sp['last'] #print sp #The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero) #s = Spread(symbols, histClose = df) #print s #s.value.plot() #print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns') #print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log') #print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard') #p = Portfolio(df) #p.setShares([1, -1.7]) #p.value.plot() quote = yahooFinance.getQuote(symbols) print quote s = Spread(symbols,histClose=df, estimateBeta = False) s.setLast(quote['last']) s.setShares(Series({'SPY':1,'IWM':-1.7})) print s #s.value.plot() #s.plot() fig = figure(2) s.plot()
bsd-3-clause
DTOcean/dtocean-core
tests/test_data_definitions_timetable.py
1
7507
import pytest from datetime import datetime, timedelta import numpy as np import pandas as pd import matplotlib.pyplot as plt from aneris.control.factory import InterfaceFactory from dtocean_core.core import (AutoFileInput, AutoFileOutput, AutoPlot, AutoQuery, Core) from dtocean_core.data import CoreMetaData from dtocean_core.data.definitions import TimeTable, TimeTableColumn def test_TimeTable_available(): new_core = Core() all_objs = new_core.control._store._structures assert "TimeTable" in all_objs.keys() def test_TimeTable(): dates = [] dt = datetime(2010, 12, 01) end = datetime(2010, 12, 02, 23, 59, 59) step = timedelta(seconds=3600) while dt < end: dates.append(dt) dt += step values = np.random.rand(len(dates)) raw = {"DateTime": dates, "a": values, "b": values} meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None]}) test = TimeTable() a = test.get_data(raw, meta) b = test.get_value(a) assert "a" in b assert len(b) == len(dates) assert len(b.resample('D').mean()) == 2 def test_get_None(): test = TimeTable() result = test.get_value(None) assert result is None @pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"]) def test_TimeTable_auto_file(tmpdir, fext): test_path = tmpdir.mkdir("sub").join("test{}".format(fext)) test_path_str = str(test_path) dates = [] dt = datetime(2010, 12, 01) end = datetime(2010, 12, 02, 23, 59, 59) step = timedelta(seconds=3600) while dt < end: dates.append(dt) dt += step values = np.random.rand(len(dates)) raw = {"DateTime": dates, "a": values, "b": values} meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None]}) test = TimeTable() fout_factory = InterfaceFactory(AutoFileOutput) FOutCls = fout_factory(meta, test) fout = FOutCls() fout._path = test_path_str fout.data.result = test.get_data(raw, meta) fout.connect() assert len(tmpdir.listdir()) == 1 fin_factory = InterfaceFactory(AutoFileInput) FInCls = fin_factory(meta, test) fin = FInCls() fin._path = test_path_str fin.connect() result = test.get_data(fin.data.result, meta) assert "a" in result assert len(result) == len(dates) assert len(result.resample('D').mean()) == 2 def test_TimeTable_auto_plot(tmpdir): dates = [] dt = datetime(2010, 12, 01) end = datetime(2010, 12, 02, 23, 59, 59) step = timedelta(seconds=3600) while dt < end: dates.append(dt) dt += step values = np.random.rand(len(dates)) raw = {"DateTime": dates, "a": values, "b": values} meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None]}) test = TimeTable() fout_factory = InterfaceFactory(AutoPlot) PlotCls = fout_factory(meta, test) plot = PlotCls() plot.data.result = test.get_data(raw, meta) plot.meta.result = meta plot.connect() assert len(plt.get_fignums()) == 1 plt.close("all") def test_TimeTableColumn_available(): new_core = Core() all_objs = new_core.control._store._structures assert "TimeTableColumn" in all_objs.keys() def test_TimeTableColumn_auto_db(mocker): dates = [] dt = datetime(2010, 12, 01) end = datetime(2010, 12, 02, 23, 59, 59) step = timedelta(seconds=3600) while dt < end: dates.append(dt) dt += step values = np.random.rand(len(dates)) mock_dict = {"date": [x.date() for x in dates], "time": [x.time() for x in dates], "a": values, "b": values} mock_df = pd.DataFrame(mock_dict) mocker.patch('dtocean_core.data.definitions.get_table_df', return_value=mock_df, autospec=True) meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None], "tables": ["mock.mock", "date", "time", "a", "b"]}) test = TimeTableColumn() query_factory = InterfaceFactory(AutoQuery) QueryCls = query_factory(meta, test) query = QueryCls() query.meta.result = meta query.connect() result = test.get_data(query.data.result, meta) assert "a" in result assert len(result) == len(dates) assert len(result.resample('D').mean()) == 2 def test_TimeSeriesColumn_auto_db_empty(mocker): mock_dict = {"date": [], "time": [], "a": [], "b": []} mock_df = pd.DataFrame(mock_dict) mocker.patch('dtocean_core.data.definitions.get_table_df', return_value=mock_df, autospec=True) meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None], "tables": ["mock.mock", "date", "time", "a", "b"]}) test = TimeTableColumn() query_factory = InterfaceFactory(AutoQuery) QueryCls = query_factory(meta, test) query = QueryCls() query.meta.result = meta query.connect() assert query.data.result is None def test_TimeSeriesColumn_auto_db_none(mocker): dates = [] dt = datetime(2010, 12, 01) end = datetime(2010, 12, 02, 23, 59, 59) step = timedelta(seconds=3600) while dt < end: dates.append(dt) dt += step values = np.random.rand(len(dates)) mock_dict = {"date": [None] * len(dates), "time": [x.time() for x in dates], "a": values, "b": values} mock_df = pd.DataFrame(mock_dict) mocker.patch('dtocean_core.data.definitions.get_table_df', return_value=mock_df, autospec=True) meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "labels": ["a", "b"], "units": ["kg", None], "tables": ["mock.mock", "date", "time", "a", "b"]}) test = TimeTableColumn() query_factory = InterfaceFactory(AutoQuery) QueryCls = query_factory(meta, test) query = QueryCls() query.meta.result = meta query.connect() assert query.data.result is None
gpl-3.0
cpbl/cpblUtilities
matplotlib_utils.py
1
7242
#!/usr/bin/python import matplotlib.pyplot as plt def prepare_figure_for_publication(ax=None, width_cm=None, width_inches=None, height_cm=None, height_inches=None, fontsize=None, fontsize_labels=None, fontsize_ticklabels=None, fontsize_legend=None, fontsize_annotations =None, TeX = True, # Used for ax=None case (setup) ): """ Two ways to use this: (1) Before creating a figure, with ax=None (2) To fine-tune a figure, using ax One reasonable option for making compact figures like for Science/Nature is to create everything at double scale. This works a little more naturally with Matplotlib's default line/axis/etc sizes. Also, if you change sizes of, e.g. xticklabels and x-axis labels after they've been created, they will not necessarily be relocated appropriately. So you can call prepare_figure_for_publication with no ax/fig argument to set up figure defaults prior to creating the figure in the first place. Some wisdom on graphics: - 2015: How to produce PDFs of a given width, with chosen font size, etc: (1) Fix width to journal specifications from the beginning / early. Adjust height as you go, according to preferences for aspect ratio: figure(figsize=(11.4/2.54, chosen height)) (2) Do not use 'bbox_inches="tight"' in savefig('fn.pdf'). Instead, use the subplot_adjust options to manually adjust edges to get the figure content to fit in the PDF output (3) Be satisfied with that. If you must get something exactly tight and exactly the right size, you do this in Inkscape. But you cannot scale the content and bbox in the same step. Load PDF, select all, choose the units in the box at the top of the main menu bar, click on the lock htere, set the width. Then, in File Properties dialog, resize file to content. Save. """ if ax is None: # Set up plot settings, prior to creation fo a figure params = { 'axes.labelsize': fontsize_labels if fontsize_labels is not None else fontsize, 'font.size': fontsize, 'legend.fontsize': fontsize_legend if fontsize_legend is not None else fontsize, 'xtick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize, 'ytick.labelsize': fontsize_ticklabels if fontsize_ticklabels is not None else fontsize_labels if fontsize_labels is not None else fontsize, 'figure.figsize': (width_inches, height_inches), } if TeX: params.update({ 'text.usetex': TeX, 'text.latex.preamble': r'\usepackage{amsmath} \usepackage{amssymb}', 'text.latex.unicode': True, }) if not TeX: params.update({'text.latex.preamble':''}) plt.rcParams.update(params) return fig = ax.get_figure() if width_inches: fig.set_figwidth(width_inches) assert width_cm is None if height_inches: fig.set_figheight(height_inches) assert height_cm is None if width_cm: fig.set_figwidth(width_cm/2.54) assert width_inches is None if height_cm: fig.set_figheight(height_cm/2.54) assert height_inches is None #ax = plt.subplot(111, xlabel='x', ylabel='y', title='title') for item in fig.findobj(plt.Text) + [ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels(): if fontsize: item.set_fontsize(fontsize) def plot_diagonal(xdata=None, ydata=None, ax=None, **args): """ Plot a 45-degree line """ import pandas as pd if ax is None: ax = plt.gca() #LL = min(min(df[xv]), min(df[yv])), max(max(df[xv]), max(df[yv])) if xdata is None and ydata is None: xl, yl = ax.get_xlim(), ax.get_ylim() LL = max(min(xl), min(yl)), min(max(xl), max(yl)), elif xdata is not None and ydata is None: assert isinstance(xdata, pd.DataFrame) dd = xdata.dropna() LL = dd.min().max(), dd.max().min() else: assert xdata is not None assert ydata is not None #if isinstance(xdata, pd.Series): xdata = xdata.vlu xl, yl = xdata, ydata LL = max(min(xl), min(yl)), min(max(xl), max(yl)), ax.plot(LL, LL, **args) def figureFontSetup(uniform=12,figsize='paper', amsmath=True): """ This is deprecated. Use prepare_figure_for_publication Set font size settings for matplotlib figures so that they are reasonable for exporting to PDF to use in publications / presentations..... [different!] If not for paper, this is not yet useful. Here are some good sizes for paper: figure(468,figsize=(4.6,2)) # in inches figureFontSetup(uniform=12) # 12 pt font for a subplot(211) or for a single plot (?) figure(127,figsize=(4.6,4)) # in inches. Only works if figure is not open from last run! why does the following not work to deal with the bad bounding-box size problem?! inkscape -f GSSseries-happyLife-QC-bw.pdf --verb=FitCanvasToDrawing -A tmp.pdf .: Due to inkscape cli sucks! bug. --> See savefigall for an inkscape implementation. 2012 May: new matplotlib has tight_layout(). But it rejigs all subplots etc. My inkscape solution is much better, since it doesn't change the layout. Hoewever, it does mean that the original size is not respected! ... Still, my favourite way from now on to make figures is to append the font size setting to the name, ie to make one for a given intended final size, and to do no resaling in LaTeX. Use tight_layout() if it looks okay, but the inkscape solution in general. n.b. a clf() erases size settings on a figure! """ figsizelookup={'paper':(4.6,4),'quarter':(1.25,1) ,None:None} try: figsize=figsizelookup[figsize] except KeyError,TypeError: pass params = {#'backend': 'ps', 'axes.labelsize': 16, #'text.fontsize': 14, 'font.size': 14, 'legend.fontsize': 10, 'xtick.labelsize': 16, 'ytick.labelsize': 16, 'text.usetex': True, 'figure.figsize': figsize } #'figure.figsize': fig_size} if uniform is not None: assert isinstance(uniform,int) params = {#'backend': 'ps', 'axes.labelsize': uniform, #'text.fontsize': uniform, 'font.size': uniform, 'legend.fontsize': uniform, 'xtick.labelsize': uniform, 'ytick.labelsize': uniform, 'text.usetex': True, 'text.latex.unicode': True, 'text.latex.preamble':r'\usepackage{amsmath},\usepackage{amssymb}', 'figure.figsize': figsize } if not amsmath: params.update({'text.latex.preamble':''}) plt.rcParams.update(params) plt.rcParams['text.latex.unicode']=True #if figsize: # plt.rcParams[figure.figsize]={'paper':(4.6,4)}[figsize] return(params)
gpl-3.0
SpaceKatt/CSPLN
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_gtkcairo.py
3
2079
""" GTK+ Matplotlib interface using cairo (not GDK) drawing operations. Author: Steve Chaplin """ import gtk if gtk.pygtk_version < (2,7,0): import cairo.gtk from matplotlib.backends import backend_cairo from matplotlib.backends.backend_gtk import * backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \ 'Pycairo(%s)' % backend_cairo.backend_version _debug = False #_debug = True def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ if _debug: print 'backend_gtkcairo.%s()' % fn_name() FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) canvas = FigureCanvasGTKCairo(thisFig) return FigureManagerGTK(canvas, num) class RendererGTKCairo (backend_cairo.RendererCairo): if gtk.pygtk_version >= (2,7,0): def set_pixmap (self, pixmap): self.gc.ctx = pixmap.cairo_create() else: def set_pixmap (self, pixmap): self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap) class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK): filetypes = FigureCanvasGTK.filetypes.copy() filetypes.update(backend_cairo.FigureCanvasCairo.filetypes) def _renderer_init(self): """Override to use cairo (rather than GDK) renderer""" if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name()) self._renderer = RendererGTKCairo (self.figure.dpi) class FigureManagerGTKCairo(FigureManagerGTK): def _get_toolbar(self, canvas): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar']=='classic': toolbar = NavigationToolbar (canvas, self.window) elif matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2GTKCairo (canvas, self.window) else: toolbar = None return toolbar class NavigationToolbar2Cairo(NavigationToolbar2GTK): def _get_canvas(self, fig): return FigureCanvasGTKCairo(fig)
gpl-3.0
spallavolu/scikit-learn
examples/linear_model/plot_bayesian_ridge.py
248
2588
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create Gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate") plt.plot(w, 'g-', label="Ground truth") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc="best", prop=dict(size=12)) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="lower left") plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
jmetzen/scikit-learn
examples/ensemble/plot_partial_dependence.py
3
4833
""" ======================== Partial Dependence Plots ======================== Partial dependence plots show the dependence between the target function [2]_ and a set of 'target' features, marginalizing over the values of all other features (the complement features). Due to the limits of human perception the size of the target feature set must be small (usually, one or two) thus the target features are usually chosen among the most important features (see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`). This example shows how to obtain partial dependence plots from a :class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California housing dataset. The example is taken from [1]_. The plot shows four one-way and one two-way partial dependence plots. The target variables for the one-way PDP are: median income (`MedInc`), avg. occupants per household (`AvgOccup`), median house age (`HouseAge`), and avg. rooms per household (`AveRooms`). We can clearly see that the median house price shows a linear relationship with the median income (top left) and that the house price drops when the avg. occupants per household increases (top middle). The top right plot shows that the house age in a district does not have a strong influence on the (median) house price; so does the average rooms per household. The tick marks on the x-axis represent the deciles of the feature values in the training data. Partial dependence plots with two target features enable us to visualize interactions among them. The two-way partial dependence plot shows the dependence of median house price on joint values of house age and avg. occupants per household. We can clearly see an interaction between the two features: For an avg. occupancy greater than two, the house price is nearly independent of the house age, whereas for values less than two there is a strong dependence on age. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. .. [2] For classification you can think of it as the regression score before the link function. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from six.moves.urllib.error import HTTPError from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.datasets.california_housing import fetch_california_housing def main(): # fetch California housing dataset try: cal_housing = fetch_california_housing() except HTTPError: print("Failed downloading california housing data.") return # split 80/20 train-test X_train, X_test, y_train, y_test = train_test_split(cal_housing.data, cal_housing.target, test_size=0.2, random_state=1) names = cal_housing.feature_names print('_' * 80) print("Training GBRT...") clf = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1, loss='huber', random_state=1) clf.fit(X_train, y_train) print("done.") print('_' * 80) print('Convenience plot with ``partial_dependence_plots``') print features = [0, 5, 1, 2, (5, 1)] fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names, n_jobs=3, grid_resolution=50) fig.suptitle('Partial dependence of house value on nonlocation features\n' 'for the California housing dataset') plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle print('_' * 80) print('Custom 3d plot via ``partial_dependence``') print fig = plt.figure() target_feature = (1, 5) pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature, X=X_train, grid_resolution=50) XX, YY = np.meshgrid(x_axis, y_axis) Z = pdp.T.reshape(XX.shape).T ax = Axes3D(fig) surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu) ax.set_xlabel(names[target_feature[0]]) ax.set_ylabel(names[target_feature[1]]) ax.set_zlabel('Partial dependence') # pretty init view ax.view_init(elev=22, azim=122) plt.colorbar(surf) plt.suptitle('Partial dependence of house value on median age and ' 'average occupancy') plt.subplots_adjust(top=0.9) plt.show() if __name__ == "__main__": main()
bsd-3-clause
jian-li/rpg_svo
svo_analysis/scripts/compare_results.py
17
6127
#!/usr/bin/python import os import sys import time import rospkg import numpy as np import matplotlib.pyplot as plt import yaml import argparse from matplotlib import rc # tell matplotlib to use latex font rc('font',**{'family':'serif','serif':['Cardo']}) rc('text', usetex=True) from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset def plot_trajectory(ax, filename, label, color, linewidth): file = open(filename) data = file.read() lines = data.replace(","," ").replace("\t"," ").split("\n") trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64) ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth) def compare_results(experiments, results_dir, comparison_dir, plot_scale_drift = False): # ------------------------------------------------------------------------------ # position error fig_poserr = plt.figure(figsize=(8,6)) ax_poserr_x = fig_poserr.add_subplot(311, ylabel='x-error [m]') ax_poserr_y = fig_poserr.add_subplot(312, ylabel='y-error [m]') ax_poserr_z = fig_poserr.add_subplot(313, ylabel='z-error [m]', xlabel='time [s]') for exp in experiments: # load dataset parameters params_stream = open(os.path.join(results_dir, exp, 'params.yaml')) params = yaml.load(params_stream) # plot translation error trans_error = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt')) trans_error[:,0] = trans_error[:,0]-trans_error[0,0] ax_poserr_x.plot(trans_error[:,0], trans_error[:,1], label=params['experiment_label']) ax_poserr_y.plot(trans_error[:,0], trans_error[:,2]) ax_poserr_z.plot(trans_error[:,0], trans_error[:,3]) ax_poserr_x.set_xlim([0, trans_error[-1,0]+4]) ax_poserr_y.set_xlim([0, trans_error[-1,0]+4]) ax_poserr_z.set_xlim([0, trans_error[-1,0]+4]) ax_poserr_x.legend(bbox_to_anchor=[0, 0], loc='lower left', ncol=3) ax_poserr_x.grid() ax_poserr_y.grid() ax_poserr_z.grid() fig_poserr.tight_layout() fig_poserr.savefig(os.path.join(comparison_dir, 'translation_error.pdf')) # ------------------------------------------------------------------------------ # orientation error fig_roterr = plt.figure(figsize=(8,6)) ax_roterr_r = fig_roterr.add_subplot(311, ylabel='roll-error [rad]') ax_roterr_p = fig_roterr.add_subplot(312, ylabel='pitch-error [rad]') ax_roterr_y = fig_roterr.add_subplot(313, ylabel='yaw-error [rad]', xlabel='time [s]') for exp in experiments: # load dataset parameters params_stream = open(os.path.join(results_dir, exp, 'params.yaml')) params = yaml.load(params_stream) # plot translation error rot_error = np.loadtxt(os.path.join(results_dir, exp, 'orientation_error.txt')) rot_error[:,0] = rot_error[:,0]-rot_error[0,0] ax_roterr_r.plot(rot_error[:,0], rot_error[:,3], label=params['experiment_label']) ax_roterr_p.plot(rot_error[:,0], rot_error[:,2]) ax_roterr_y.plot(rot_error[:,0], rot_error[:,1]) ax_roterr_r.set_xlim([0, rot_error[-1,0]+4]) ax_roterr_p.set_xlim([0, rot_error[-1,0]+4]) ax_roterr_y.set_xlim([0, rot_error[-1,0]+4]) ax_roterr_r.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3) ax_roterr_r.grid() ax_roterr_p.grid() ax_roterr_y.grid() fig_roterr.tight_layout() fig_roterr.savefig(os.path.join(comparison_dir, 'orientation_error.pdf')) # ------------------------------------------------------------------------------ # scale error if plot_scale_drift: fig_scale = plt.figure(figsize=(8,2.5)) ax_scale = fig_scale.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]') for exp in experiments: # load dataset parameters params = yaml.load(open(os.path.join(results_dir, exp, 'params.yaml'))) # plot translation error scale_drift = open(os.path.join(results_dir, exp, 'scale_drift.txt')) scale_drift[:,0] = scale_drift[:,0]-scale_drift[0,0] ax_scale.plot(scale_drift[:,0], scale_drift[:,1], label=params['experiment_label']) ax_scale.set_xlim([0, rot_error[-1,0]+4]) ax_scale.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3) ax_scale.grid() fig_scale.tight_layout() fig_scale.savefig(os.path.join(comparison_dir, 'scale_drift.pdf')) # ------------------------------------------------------------------------------ # trajectory # fig_traj = plt.figure(figsize=(8,4.8)) # ax_traj = fig_traj.add_subplot(111, xlabel='x [m]', ylabel='y [m]', aspect='equal', xlim=[-3.1, 4], ylim=[-1.5, 2.6]) # # plotTrajectory(ax_traj, '/home/cforster/Datasets/asl_vicon_d2/groundtruth_filtered.txt', 'Groundtruth', 'k', 1.5) # plotTrajectory(ax_traj, results_dir+'/20130911_2229_nslam_i7_asl2_fast/traj_estimate_rotated.txt', 'Fast', 'g', 1) # plotTrajectory(ax_traj, results_dir+'/20130906_2149_ptam_i7_asl2/traj_estimate_rotated.txt', 'PTAM', 'r', 1) # # mark_inset(ax_traj, axins, loc1=2, loc2=4, fc="none", ec='b') # plt.draw() # plt.show() # ax_traj.legend(bbox_to_anchor=[1, 0], loc='lower right', ncol=3) # ax_traj.grid() # fig_traj.tight_layout() # fig_traj.savefig('../results/trajectory_asl.pdf') if __name__ == '__main__': default_name = time.strftime("%Y%m%d_%H%M", time.localtime())+'_comparison' parser = argparse.ArgumentParser(description='Compare results.') parser.add_argument('result_directories', nargs='+', help='list of result directories to compare') parser.add_argument('--name', help='name of the comparison', default=default_name) args = parser.parse_args() # create folder for comparison results results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results') comparison_dir = os.path.join(results_dir, args.name) if not os.path.exists(comparison_dir): os.makedirs(comparison_dir) # run comparison compare_results(args.result_directories, results_dir, comparison_dir)
gpl-3.0
jdavidrcamacho/Tests_GP
02 - Programs being tested/RV_function.py
1
3615
# -*- coding: utf-8 -*- """ Created on Fri Feb 3 11:36:58 2017 @author: camacho """ import numpy as np import matplotlib.pyplot as pl pl.close("all") ##### RV FUNCTION 1 - circular orbit def RV_circular(P=365,K=0.1,T=0,gamma=0,time=100,space=20): #parameters #P = period in days #K = semi-amplitude of the signal #T = velocity at zero phase #gamma = average velocity of the star #time = time of the simulation #space => I want an observation every time/space days t=np.linspace(0,time,space) RV=[K*np.sin(2*np.pi*x/P - T) + gamma for x in t] RV=[x for x in RV] #m/s return [t,RV] ##### RV FUNCTION 2 - keplerian orbit def RV_kepler(P=365,e=0,K=0.1,T=0,gamma=0,w=np.pi,time=100,space=1000): #parameters #P = period in days #e = eccentricity #K = RV amplitude #gamma = constant system RV #T = zero phase #w = longitude of the periastron #time = time of the simulation #space => I want an observation every time/space days t=np.linspace(0,time,space) #mean anomaly Mean_anom=[2*np.pi*(x1-T)/P for x1 in t] #eccentric anomaly -> E0=M + e*sin(M) + 0.5*(e**2)*sin(2*M) E0=[x + e*np.sin(x) + 0.5*(e**2)*np.sin(2*x) for x in Mean_anom] #mean anomaly -> M0=E0 - e*sin(E0) M0=[x - e*np.sin(x) for x in E0] i=0 while i<100: #[x + y for x, y in zip(first, second)] calc_aux=[x2-y for x2,y in zip(Mean_anom,M0)] E1=[x3 + y/(1-e*np.cos(x3)) for x3,y in zip(E0,calc_aux)] M1=[x4 - e*np.sin(x4) for x4 in E0] i+=1 E0=E1 M0=M1 nu=[2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(x5/2)) for x5 in E0] RV=[ gamma + K*(e*np.cos(w)+np.cos(w+x6)) for x6 in nu] RV=[x for x in RV] #m/s return t,RV #Examples #a=RV_circular() #pl.figure('RV_circular with P=365') #pl.plot(a[0],a[1],':',) #pl.title('planet of 365 days orbit') #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #b=RV_circular(P=100) #pl.figure('RV_circular with P=100') #pl.title('planet of 100 days orbit') #pl.plot(b[0],b[1],':',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #c=RV_kepler(P=100,e=0,w=np.pi,time=100) #pl.figure() #pl.plot(c[0],c[1],':',) #pl.title('P=100, e=0, w=pi, time=100') #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d1=RV_kepler(P=100,e=0, w=0,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d2=RV_kepler(P=100,e=0, w=np.pi,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') #d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500) #pl.figure() #pl.title('P=100, e=0, w=pi, time=25') #pl.plot(d[0],d[1],'-',) #pl.xlabel('time') #pl.ylabel('RV (Km/s)') d1=RV_kepler(P=100,e=0, w=0,time=500) d2=RV_kepler(P=100,e=0.5, w=0,time=500) d3=RV_kepler(P=100,e=0.5, w=np.pi,time=500) d4=RV_kepler(P=100,e=0.5, w=np.pi/2,time=500) # Four axes, returned as a 2-d array f, axarr = pl.subplots(2, 2) axarr[0, 0].plot(d1[0],d1[1]) axarr[0, 0].set_title('e=0 and w=0') axarr[0, 1].plot(d2[0],d2[1]) axarr[0, 1].set_title('e=0.5, w=0') axarr[1, 0].plot(d3[0],d3[1]) axarr[1, 0].set_title('e=0.5, w=pi') axarr[1, 1].plot(d4[0],d4[1]) axarr[1, 1].set_title('e=0.5, w=pi/2') #pl.setp(pl.xticks(fontsize = 18) for a in axarr[0,:])#pl.yticks(fontsize=18)) pl.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
mit
IamJeffG/geopandas
geopandas/plotting.py
1
13216
from __future__ import print_function import warnings import numpy as np from six import next from six.moves import xrange from shapely.geometry import Polygon def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs): """ Plot a single Polygon geometry """ from descartes.patch import PolygonPatch a = np.asarray(poly.exterior) if poly.has_z: poly = Polygon(zip(*poly.exterior.xy)) # without Descartes, we could make a Patch of exterior ax.add_patch(PolygonPatch(poly, facecolor=facecolor, linewidth=0, alpha=alpha)) # linewidth=0 because boundaries are drawn separately ax.plot(a[:, 0], a[:, 1], color=edgecolor, linewidth=linewidth, **kwargs) for p in poly.interiors: x, y = zip(*p.coords) ax.plot(x, y, color=edgecolor, linewidth=linewidth) def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5, linewidth=1.0, **kwargs): """ Can safely call with either Polygon or Multipolygon geometry """ if geom.type == 'Polygon': plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs) elif geom.type == 'MultiPolygon': for poly in geom.geoms: plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, linewidth=linewidth, **kwargs) def plot_linestring(ax, geom, color='black', linewidth=1.0, **kwargs): """ Plot a single LineString geometry """ a = np.array(geom) ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth, **kwargs) def plot_multilinestring(ax, geom, color='red', linewidth=1.0, **kwargs): """ Can safely call with either LineString or MultiLineString geometry """ if geom.type == 'LineString': plot_linestring(ax, geom, color=color, linewidth=linewidth, **kwargs) elif geom.type == 'MultiLineString': for line in geom.geoms: plot_linestring(ax, line, color=color, linewidth=linewidth, **kwargs) def plot_point(ax, pt, marker='o', markersize=2, color='black', **kwargs): """ Plot a single Point geometry """ ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, color=color, **kwargs) def gencolor(N, colormap='Set1'): """ Color generator intended to work with one of the ColorBrewer qualitative color scales. Suggested values of colormap are the following: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 (although any matplotlib colormap will work). """ from matplotlib import cm # don't use more than 9 discrete colors n_colors = min(N, 9) cmap = cm.get_cmap(colormap, n_colors) colors = cmap(range(n_colors)) for i in xrange(N): yield colors[i % n_colors] def plot_series(s, cmap='Set1', color=None, ax=None, linewidth=1.0, figsize=None, **color_kwds): """ Plot a GeoSeries Generate a plot of a GeoSeries geometry with matplotlib. Parameters ---------- Series The GeoSeries to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. cmap : str (default 'Set1') The name of a colormap recognized by matplotlib. Any colormap will work, but categorical colormaps are generally recommended. Examples of useful discrete colormaps include: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 color : str (default None) If specified, all objects will be colored uniformly. ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot linewidth : float (default 1.0) Line width for geometries. figsize : pair of floats (default None) Size of the resulting matplotlib.figure.Figure. If the argument ax is given explicitly, figsize is ignored. **color_kwds : dict Color options to be passed on to the actual plot function Returns ------- matplotlib axes instance """ if 'colormap' in color_kwds: warnings.warn("'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning) cmap = color_kwds.pop('colormap') if 'axes' in color_kwds: warnings.warn("'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning) ax = color_kwds.pop('axes') import matplotlib.pyplot as plt if ax is None: fig, ax = plt.subplots(figsize=figsize) ax.set_aspect('equal') color_generator = gencolor(len(s), colormap=cmap) for geom in s: if color is None: col = next(color_generator) else: col = color if geom.type == 'Polygon' or geom.type == 'MultiPolygon': if 'facecolor' in color_kwds: plot_multipolygon(ax, geom, linewidth=linewidth, **color_kwds) else: plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds) elif geom.type == 'Point': plot_point(ax, geom, color=col, **color_kwds) plt.draw() return ax def plot_dataframe(s, column=None, cmap=None, color=None, linewidth=1.0, categorical=False, legend=False, ax=None, scheme=None, k=5, vmin=None, vmax=None, figsize=None, **color_kwds): """ Plot a GeoDataFrame Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Otherwise, a categorical plot of the geometries in the `geometry` column will be generated. Parameters ---------- GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str (default None) The name of the column to be plotted. categorical : bool (default False) If False, cmap will reflect numerical values of the column being plotted. For non-numerical columns (or if column=None), this will be set to True. cmap : str (default 'Set1') The name of a colormap recognized by matplotlib. color : str (default None) If specified, all objects will be colored uniformly. linewidth : float (default 1.0) Line width for geometries. legend : bool (default False) Plot a legend (Experimental; currently for categorical plots only) ax : matplotlib.pyplot.Artist (default None) axes on which to draw the plot scheme : pysal.esda.mapclassify.Map_Classifier Choropleth classification schemes (requires PySAL) k : int (default 5) Number of classes (ignored if scheme is None) vmin : None or float (default None) Minimum value of cmap. If None, the minimum data value in the column to be plotted is used. vmax : None or float (default None) Maximum value of cmap. If None, the maximum data value in the column to be plotted is used. figsize Size of the resulting matplotlib.figure.Figure. If the argument axes is given explicitly, figsize is ignored. **color_kwds : dict Color options to be passed on to the actual plot function Returns ------- matplotlib axes instance """ if 'colormap' in color_kwds: warnings.warn("'colormap' is deprecated, please use 'cmap' instead " "(for consistency with matplotlib)", FutureWarning) cmap = color_kwds.pop('colormap') if 'axes' in color_kwds: warnings.warn("'axes' is deprecated, please use 'ax' instead " "(for consistency with pandas)", FutureWarning) ax = color_kwds.pop('axes') import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm if column is None: return plot_series(s.geometry, cmap=cmap, color=color, ax=ax, linewidth=linewidth, figsize=figsize, **color_kwds) else: if s[column].dtype is np.dtype('O'): categorical = True if categorical: if cmap is None: cmap = 'Set1' categories = list(set(s[column].values)) categories.sort() valuemap = dict([(k, v) for (v, k) in enumerate(categories)]) values = [valuemap[k] for k in s[column]] else: values = s[column] if scheme is not None: binning = __pysal_choro(values, scheme, k=k) values = binning.yb # set categorical to True for creating the legend categorical = True binedges = [binning.yb.min()] + binning.bins.tolist() categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1]) for i in range(len(binedges)-1)] cmap = norm_cmap(values, cmap, Normalize, cm, vmin=vmin, vmax=vmax) if ax is None: fig, ax = plt.subplots(figsize=figsize) ax.set_aspect('equal') for geom, value in zip(s.geometry, values): if color is None: col = cmap.to_rgba(value) else: col = color if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=col, linewidth=linewidth, **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=col, linewidth=linewidth, **color_kwds) elif geom.type == 'Point': plot_point(ax, geom, color=col, **color_kwds) if legend: if categorical: patches = [] for value, cat in enumerate(categories): patches.append(Line2D([0], [0], linestyle="none", marker="o", alpha=color_kwds.get('alpha', 0.5), markersize=10, markerfacecolor=cmap.to_rgba(value))) ax.legend(patches, categories, numpoints=1, loc='best') else: # TODO: show a colorbar raise NotImplementedError plt.draw() return ax def __pysal_choro(values, scheme, k=5): """ Wrapper for choropleth schemes from PySAL for use with plot_dataframe Parameters ---------- values Series to be plotted scheme pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks'] k number of classes (2 <= k <=9) Returns ------- binning Binning objects that holds the Series with values replaced with class identifier and the bins. """ try: from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks schemes = {} schemes['equal_interval'] = Equal_Interval schemes['quantiles'] = Quantiles schemes['fisher_jenks'] = Fisher_Jenks s0 = scheme scheme = scheme.lower() if scheme not in schemes: scheme = 'quantiles' warnings.warn('Unrecognized scheme "{0}". Using "Quantiles" ' 'instead'.format(s0), UserWarning, stacklevel=3) if k < 2 or k > 9: warnings.warn('Invalid k: {0} (2 <= k <= 9), setting k=5 ' '(default)'.format(k), UserWarning, stacklevel=3) k = 5 binning = schemes[scheme](values, k) return binning except ImportError: raise ImportError("PySAL is required to use the 'scheme' keyword") def norm_cmap(values, cmap, normalize, cm, vmin=None, vmax=None): """ Normalize and set colormap Parameters ---------- values Series or array to be normalized cmap matplotlib Colormap normalize matplotlib.colors.Normalize cm matplotlib.cm vmin Minimum value of colormap. If None, uses min(values). vmax Maximum value of colormap. If None, uses max(values). Returns ------- n_cmap mapping of normalized values to colormap (cmap) """ mn = min(values) if vmin is None else vmin mx = max(values) if vmax is None else vmax norm = normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) return n_cmap
bsd-3-clause
markovmodel/molPX
molpx/_linkutils.py
1
18471
import numpy as _np from matplotlib.widgets import AxesWidget as _AxesWidget from matplotlib.colors import is_color_like as _is_color_like from matplotlib.axes import Axes as _mplAxes from matplotlib.figure import Figure as _mplFigure from IPython.display import display as _ipydisplay from pyemma.util.types import is_int as _is_int from scipy.spatial import cKDTree as _cKDTree from ._bmutils import get_ascending_coord_idx from mdtraj import Trajectory as _mdTrajectory from nglview import NGLWidget as _NGLwdg from ipywidgets import HBox as _HBox, VBox as _VBox def pts_per_axis_unit(mplax, pt_per_inch=72): r""" Return how many pt per axis unit of a given maptplotlib axis a figure has Parameters ---------- mplax : :obj:`matplotlib.axes._subplots.AxesSubplot` pt_per_inch : how many points are in an inch (this number should not change) Returns -------- pt_per_xunit, pt_per_yunit """ # matplotlib voodoo # Get bounding box bbox = mplax.get_window_extent().transformed(mplax.get_figure().dpi_scale_trans.inverted()) span_inch = _np.array([bbox.width, bbox.height], ndmin=2).T span_units = [mplax.get_xlim(), mplax.get_ylim()] span_units = _np.diff(span_units, axis=1) inch_per_unit = span_inch / span_units return inch_per_unit * pt_per_inch def update2Dlines(iline, x, y): """ provide a common interface to update objects on the plot to a new position (x,y) depending on whether they are hlines, vlines, dots etc Parameters ---------- iline: :obj:`matplotlib.lines.Line2D` object x : float with new position y : float with new position """ # TODO FIND OUT A CLEANER WAY TO DO THIS (dict or class) if not hasattr(iline,'whatisthis'): raise AttributeError("This method will only work if iline has the attribute 'whatsthis'") else: # TODO find cleaner way of distinguishing these 2Dlines if iline.whatisthis in ['dot']: iline.set_xdata((x)) iline.set_ydata((y)) elif iline.whatisthis in ['lineh']: iline.set_ydata((y,y)) elif iline.whatisthis in ['linev']: iline.set_xdata((x,x)) else: # TODO: FIND OUT WNY EXCEPTIONS ARE NOT BEING RAISED raise TypeError("what is this type of 2Dline?") class ClickOnAxisListener(object): def __init__(self, ngl_wdg, crosshairs, showclick_objs, ax, pos, list_mpl_objects_to_update): self.ngl_wdg = ngl_wdg self.crosshairs = crosshairs self.showclick_objs = showclick_objs self.ax = ax self.pos = pos self.list_mpl_objects_to_update = list_mpl_objects_to_update self.list_of_dots = [None]*self.pos.shape[0] self.fig_size = self.ax.figure.get_size_inches() self.kdtree = None def build_tree(self): # Use ax.transData to compute distance in pixels # regardelss of the axes units (http://matplotlib.org/users/transforms_tutorial.html) # Corresponds to the visual distance between clicked point and target point self.kdtree = _cKDTree(self.ax.transData.transform(self.pos)) @property def figure_changed_size(self): return not _np.allclose(self.fig_size, self.ax.figure.get_size_inches()) def __call__(self, event): # Wait for the first click or a a figsize change # to build the kdtree if self.figure_changed_size or self.kdtree is None: self.build_tree() self.fig_size = self.ax.figure.get_size_inches() # Was the click inside the bounding box? if self.ax.get_window_extent().contains(event.x, event.y): if self.crosshairs: for iline in self.showclick_objs: update2Dlines(iline, event.xdata, event.ydata) _, index = self.kdtree.query(x=[event.x, event.y], k=1) for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[index, 0], self.pos[index, 1]) self.ngl_wdg.isClick = True if hasattr(self.ngl_wdg, '_GeomsInWid'): # We're in a sticky situation if event.button == 1: # Pressed left self.ngl_wdg._GeomsInWid[index].show() if self.list_of_dots[index] is None: # Plot and store the dot in case there wasn't self.list_of_dots[index] = self.ax.plot(self.pos[index, 0], self.pos[index, 1], 'o', c=self.ngl_wdg._GeomsInWid[index].color_dot, ms=7)[0] elif event.button in [2, 3]: # Pressed right or middle self.ngl_wdg._GeomsInWid[index].hide() # Delete dot if the geom is not visible anymore if not self.ngl_wdg._GeomsInWid[index].is_visible() and self.list_of_dots[index] is not None: self.list_of_dots[index].remove() self.list_of_dots[index] = None else: # We're not sticky, just go to the frame self.ngl_wdg.frame = index class MolPXBox(object): r""" Class created to be the parent class of MolPXHBox and MolPXVBox, which inherit from MolPXBox and the ipywidget classes HBox and VBox (*args and **kwargs are for these) The sole purpose of this class is to avoid monkey-patching elsewhere in the code, this class creates them as empty lists on instantiation. It also implements two methods: * self.display (=IPython.display(self) * append_if_existing """ def __init__(self, *args, **kwargs): self.linked_axes = [] self.linked_mdgeoms = [] self.linked_ngl_wdgs = [] self.linked_data_arrays = [] self.linked_ax_wdgs = [] self.linked_figs = [] def display(self): _ipydisplay(self) def append_if_existing(self, args0, startswith_arg="linked_"): r""" args0 is the tuple containing all widgets to be included in the MolPXBox this tuple can contain itself other MolPXWidget so we iterate through them and appending linked stuff """ for iarg in args0: for attrname in dir(iarg): if attrname.startswith(startswith_arg) and len(iarg.__dict__[attrname]) != 0: self.__dict__[attrname] += iarg.__dict__[attrname] def auto_append_these_mpx_attrs(iobj, *attrs): r""" The attribute s name is automatically derived from the attribute s type via a type:name dictionary *attrs : any number of unnamed objects of the types in type2attrname. If the object type is a list, it will be flattened prior to attempting """ attrs_flat_list = [] for sublist in attrs: if isinstance(sublist, list): for item in sublist: attrs_flat_list.append(item) else: attrs_flat_list.append(sublist) # Go through the arguments and assign them an attrname according to their types for iattr in attrs_flat_list: for attrname, itype in type2attrname.items(): if isinstance(iattr, itype): iobj.__dict__[attrname].append(iattr) break class MolPXHBox(_HBox, MolPXBox): def __init__(self, *args, **kwargs): super(MolPXHBox, self).__init__(*args, **kwargs) self.append_if_existing(args[0]) class MolPXVBox(_VBox, MolPXBox): def __init__(self, *args, **kwargs): super(MolPXVBox, self).__init__(*args, **kwargs) self.append_if_existing(args[0]) type2attrname = {"linked_axes": _mplAxes, "linked_mdgeoms": _mdTrajectory, "linked_ngl_wdgs": _NGLwdg, "linked_data_arrays": _np.ndarray, "linked_ax_wdgs": _AxesWidget, "linked_figs": _mplFigure, } class ChangeInNGLWidgetListener(object): def __init__(self, ngl_wdg, list_mpl_objects_to_update, pos): self.ngl_wdg = ngl_wdg self.list_mpl_objects_to_update = list_mpl_objects_to_update self.pos = pos def __call__(self, change): self.ngl_wdg.isClick = False _idx = change["new"] try: for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[_idx, 0], self.pos[_idx, 1]) #print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"])) except IndexError as e: for idot in self.list_mpl_objects_to_update: update2Dlines(idot, self.pos[0, 0], self.pos[0, 1]) print("caught index error with index %s (new=%s, old=%s)" % (_idx, change["new"], change["old"])) #print("set xy = (%s, %s)" % (x[_idx], y[_idx])) class GeometryInNGLWidget(object): r""" returns an object that is aware of where its geometries are located in the NGLWidget their representation status The object exposes two methods, show and hide, to automagically know what to do """ def __init__(self, geom, ngl_wdg, list_of_repr_dicts=None, color_molecule_hex='Element', n_small=10): self.lives_at_components = [] self.geom = geom self.ngl_wdg = ngl_wdg self.have_repr = [] sticky_rep = 'cartoon' if self.geom[0].top.n_residues < n_small: sticky_rep = 'ball+stick' if list_of_repr_dicts is None: list_of_repr_dicts = [{'repr_type': sticky_rep, 'selection': 'all'}] self.list_of_repr_dicts = list_of_repr_dicts self.color_molecule_hex = color_molecule_hex self.color_dot = color_molecule_hex if isinstance(self.color_molecule_hex, str) and color_molecule_hex == 'Element': self.color_dot = 'red' def show(self): # Show can mean either # - add a whole new component (case 1) # - add the representation again to a representation-less component (case 2) # CASE 1 if self.is_empty() or self.all_reps_are_on(): if len(self.have_repr) == self.geom.n_frames: print("arrived at the end") component = None else: idx = len(self.have_repr) self.ngl_wdg.add_trajectory(self.geom[idx]) self.lives_at_components.append(len(self.ngl_wdg._ngl_component_ids) - 1) self.ngl_wdg.clear_representations(component=self.lives_at_components[-1]) self.have_repr.append(True) component = self.lives_at_components[-1] # CASE 2 elif self.any_rep_is_off(): # Some are living in the widget already but have no rep idx = _np.argwhere(~_np.array(self.have_repr))[0].squeeze() component = self.lives_at_components[idx] self.have_repr[idx] = True else: raise Exception("This situation should not arise. This is a bug") if component is not None: for irepr in self.list_of_repr_dicts: self.ngl_wdg.add_representation(irepr['repr_type'], selection=irepr['selection'], component=component, color=self.color_molecule_hex) def hide(self): if self.is_empty() or self.all_reps_are_off(): print("nothing to hide") pass elif self.any_rep_is_on(): # There's represented components already in the widget idx = _np.argwhere(self.have_repr)[-1].squeeze() self.ngl_wdg.clear_representations(component=self.lives_at_components[idx]) self.have_repr[idx] = False else: raise Exception("This situation should not arise. This is a bug") # Quickhand methods for knowing what's up def is_empty(self): if len(self.have_repr) == 0: return True else: return False def all_reps_are_off(self): if len(self.have_repr) == 0: return True else: return _np.all(~_np.array(self.have_repr)) def all_reps_are_on(self): if len(self.have_repr) == 0: return False else: return _np.all(self.have_repr) def any_rep_is_off(self): return _np.any(~_np.array(self.have_repr)) def any_rep_is_on(self): return _np.any(self.have_repr) def is_visible(self): if self.is_empty() or self.all_reps_are_off(): return False else: return True def link_ax_w_pos_2_nglwidget(ax, pos, ngl_wdg, crosshairs=True, dot_color='red', band_width=None, radius=False, directionality=None, exclude_coord=None, ): r""" Initial idea for this function comes from @arose, the rest is @gph82 Parameters ---------- ax : matplotlib axis object to be linked pos : ndarray of shape (N,2) with the positions of the geoms in the ngl_wdg crosshairs : Boolean or str If True, a crosshair will show where the mouse-click ocurred. If 'h' or 'v', only the horizontal or vertical line of the crosshair will be shown, respectively. If False, no crosshair will appear dot_color : Anything that yields matplotlib.colors.is_color_like(dot_color)==True Default is 'red'. dot_color='None' yields no dot band_width : None or iterable of len = 2 If band_width is not None, the method tries to figure out on its own if there is an ascending coordinate and will include a moving band on :obj:ax of this width (in units of the axis along which the band is plotted) If the method cannot find an ascending coordinate, an exception is thrown directionality : str or None, default is None If not None, directionality can be either 'a2w' or 'w2a', meaning that connectivity between axis and widget will be only established as * 'a2w' : action in axis triggers action in widget, but not the other way around * 'w2a' : action in widget triggers action in axis, but not the other way around exclude_coord : None or int , default is None The excluded coordinate will not be considered when computing the nearest-point-to-click. Typical use case is for visualize.traj to only compute distances horizontally along the time axis Returns ------- axes_widget : :obj:`matplotlib.Axes.Axeswidget` that has been linked to the NGLWidget """ assert directionality in [None, 'a2w', 'w2a'], "The directionality parameter has to be in [None, 'a2w', 'w2a'] " \ "not %s"%directionality assert crosshairs in [True, False, 'h', 'v'], "The crosshairs parameter has to be in [True, False, 'h','v'], " \ "not %s" % crosshairs ipos = _np.copy(pos) if _is_int(exclude_coord): ipos[:,exclude_coord] = 0 # Are we in a sticky situation? if hasattr(ngl_wdg, '_GeomsInWid'): sticky = True else: assert ngl_wdg.trajectory_0.n_frames == pos.shape[0], \ ("Mismatching frame numbers %u vs %u" % (ngl_wdg.trajectory_0.n_frames, pos.shape[0])) sticky = False # Basic interactive objects showclick_objs = [] if crosshairs in [True, 'h']: lineh = ax.axhline(ax.get_ybound()[0], c="black", ls='--') setattr(lineh, 'whatisthis', 'lineh') showclick_objs.append(lineh) if crosshairs in [True, 'v']: linev = ax.axvline(ax.get_xbound()[0], c="black", ls='--') setattr(linev, 'whatisthis', 'linev') showclick_objs.append(linev) if _is_color_like(dot_color): pass else: raise TypeError('dot_color should be a matplotlib color') dot = ax.plot(pos[0,0],pos[0,1], 'o', c=dot_color, ms=7, zorder=100)[0] setattr(dot,'whatisthis','dot') list_mpl_objects_to_update = [dot] # Other objects, related to smoothing options if band_width is not None: if radius: band_width_in_pts = int(_np.round(pts_per_axis_unit(ax).mean() * _np.mean(band_width))) rad = ax.plot(pos[0, 0], pos[0, 1], 'o', ms=_np.round(band_width_in_pts), c='green', alpha=.25, markeredgecolor='None')[0] setattr(rad, 'whatisthis', 'dot') if not sticky: list_mpl_objects_to_update.append(rad) else: # print("Band_width(x,y) is %s" % (band_width)) coord_idx = get_ascending_coord_idx(pos) if _np.ndim(coord_idx)>0 and len(coord_idx)==0: raise ValueError("Must have an ascending coordinate for band_width usage") band_width_in_pts = int(_np.round(pts_per_axis_unit(ax)[coord_idx] * band_width[coord_idx])) # print("Band_width in %s is %s pts"%('xy'[coord_idx], band_width_in_pts)) band_call = [ax.axvline, ax.axhline][coord_idx] band_init = [ax.get_xbound, ax.get_ybound][coord_idx] band_type = ['linev', 'lineh'][coord_idx] band = band_call(band_init()[0], lw=band_width_in_pts, c="green", ls='-', alpha=.25) setattr(band, 'whatisthis', band_type) list_mpl_objects_to_update.append(band) ngl_wdg.isClick = False CLA_listener = ClickOnAxisListener(ngl_wdg, crosshairs, showclick_objs, ax, pos, list_mpl_objects_to_update) NGL_listener = ChangeInNGLWidgetListener(ngl_wdg, list_mpl_objects_to_update, pos) # Connect axes to widget axes_widget = _AxesWidget(ax) if directionality in [None, 'a2w']: axes_widget.connect_event('button_release_event', CLA_listener) # Connect widget to axes if directionality in [None, 'w2a']: ngl_wdg.observe(NGL_listener, "frame", "change") ngl_wdg.center() return axes_widget
lgpl-3.0
ofgulban/scikit-image
doc/examples/edges/plot_marching_cubes.py
2
2078
""" ============== Marching Cubes ============== Marching cubes is an algorithm to extract a 2D surface mesh from a 3D volume. This can be conceptualized as a 3D generalization of isolines on topographical or weather maps. It works by iterating across the volume, looking for regions which cross the level of interest. If such regions are found, triangulations are generated and added to an output mesh. The final result is a set of vertices and a set of triangular faces. The algorithm requires a data volume and an isosurface value. For example, in CT imaging Hounsfield units of +700 to +3000 represent bone. So, one potential input would be a reconstructed CT set of data and the value +700, to extract a mesh for regions of bone or bone-like density. This implementation also works correctly on anisotropic datasets, where the voxel spacing is not equal for every spatial dimension, through use of the `spacing` kwarg. """ import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.art3d import Poly3DCollection from skimage import measure from skimage.draw import ellipsoid # Generate a level set about zero of two identical ellipsoids in 3D ellip_base = ellipsoid(6, 10, 16, levelset=True) ellip_double = np.concatenate((ellip_base[:-1, ...], ellip_base[2:, ...]), axis=0) # Use marching cubes to obtain the surface mesh of these ellipsoids verts, faces, normals, values = measure.marching_cubes(ellip_double, 0) # Display resulting triangular mesh using Matplotlib. This can also be done # with mayavi or visvis (see skimage.measure.marching_cubes docstring). fig = plt.figure(figsize=(10, 12)) ax = fig.add_subplot(111, projection='3d') # Fancy indexing: `verts[faces]` to generate a collection of triangles mesh = Poly3DCollection(verts[faces]) ax.add_collection3d(mesh) ax.set_xlabel("x-axis: a = 6 per ellipsoid") ax.set_ylabel("y-axis: b = 10") ax.set_zlabel("z-axis: c = 16") ax.set_xlim(0, 24) # a = 6 (times two for 2nd ellipsoid) ax.set_ylim(0, 20) # b = 10 ax.set_zlim(0, 32) # c = 16 plt.show()
bsd-3-clause
postvakje/sympy
sympy/plotting/plot.py
7
65097
"""Plotting module for Sympy. A plot is represented by the ``Plot`` class that contains a reference to the backend and a list of the data series to be plotted. The data series are instances of classes meant to simplify getting points and meshes from sympy expressions. ``plot_backends`` is a dictionary with all the backends. This module gives only the essential. For all the fancy stuff use directly the backend. You can get the backend wrapper for every plot from the ``_backend`` attribute. Moreover the data series classes have various useful methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may be useful if you wish to use another plotting library. Especially if you need publication ready graphs and this module is not enough for you - just get the ``_backend`` attribute and add whatever you want directly to it. In the case of matplotlib (the common way to graph data in python) just copy ``_backend.fig`` which is the figure and ``_backend.ax`` which is the axis and work on them as you would on any other matplotlib object. Simplicity of code takes much greater importance than performance. Don't use it if you care at all about performance. A new backend instance is initialized every time you call ``show()`` and the old one is left to the garbage collector. """ from __future__ import print_function, division import inspect from collections import Callable import warnings import sys from sympy import sympify, Expr, Tuple, Dummy, Symbol from sympy.external import import_module from sympy.core.compatibility import range from sympy.utilities.decorator import doctest_depends_on from sympy.utilities.iterables import is_sequence from .experimental_lambdify import (vectorized_lambdify, lambdify) # N.B. # When changing the minimum module version for matplotlib, please change # the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py` # Backend specific imports - textplot from sympy.plotting.textplot import textplot # Global variable # Set to False when running tests / doctests so that the plots don't show. _show = True def unset_show(): global _show _show = False ############################################################################## # The public interface ############################################################################## def _arity(f): """ Python 2 and 3 compatible version that do not raise a Deprecation warning. """ if sys.version_info < (3,): return len(inspect.getargspec(f)[0]) else: param = inspect.signature(f).parameters.values() return len([p for p in param if p.kind == p.POSITIONAL_OR_KEYWORD]) class Plot(object): """The central class of the plotting module. For interactive work the function ``plot`` is better suited. This class permits the plotting of sympy expressions using numerous backends (matplotlib, textplot, the old pyglet module for sympy, Google charts api, etc). The figure can contain an arbitrary number of plots of sympy expressions, lists of coordinates of points, etc. Plot has a private attribute _series that contains all data series to be plotted (expressions for lines or surfaces, lists of points, etc (all subclasses of BaseSeries)). Those data series are instances of classes not imported by ``from sympy import *``. The customization of the figure is on two levels. Global options that concern the figure as a whole (eg title, xlabel, scale, etc) and per-data series options (eg name) and aesthetics (eg. color, point shape, line type, etc.). The difference between options and aesthetics is that an aesthetic can be a function of the coordinates (or parameters in a parametric plot). The supported values for an aesthetic are: - None (the backend uses default values) - a constant - a function of one variable (the first coordinate or parameter) - a function of two variables (the first and second coordinate or parameters) - a function of three variables (only in nonparametric 3D plots) Their implementation depends on the backend so they may not work in some backends. If the plot is parametric and the arity of the aesthetic function permits it the aesthetic is calculated over parameters and not over coordinates. If the arity does not permit calculation over parameters the calculation is done over coordinates. Only cartesian coordinates are supported for the moment, but you can use the parametric plots to plot in polar, spherical and cylindrical coordinates. The arguments for the constructor Plot must be subclasses of BaseSeries. Any global option can be specified as a keyword argument. The global options for a figure are: - title : str - xlabel : str - ylabel : str - legend : bool - xscale : {'linear', 'log'} - yscale : {'linear', 'log'} - axis : bool - axis_center : tuple of two floats or {'center', 'auto'} - xlim : tuple of two floats - ylim : tuple of two floats - aspect_ratio : tuple of two floats or {'auto'} - autoscale : bool - margin : float in [0, 1] The per data series options and aesthetics are: There are none in the base series. See below for options for subclasses. Some data series support additional aesthetics or options: ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries support the following: Aesthetics: - line_color : function which returns a float. options: - label : str - steps : bool - integers_only : bool SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following: aesthetics: - surface_color : function which returns a float. """ def __init__(self, *args, **kwargs): super(Plot, self).__init__() # Options for the graph as a whole. # The possible values for each option are described in the docstring of # Plot. They are based purely on convention, no checking is done. self.title = None self.xlabel = None self.ylabel = None self.aspect_ratio = 'auto' self.xlim = None self.ylim = None self.axis_center = 'auto' self.axis = True self.xscale = 'linear' self.yscale = 'linear' self.legend = False self.autoscale = True self.margin = 0 # Contains the data objects to be plotted. The backend should be smart # enough to iterate over this list. self._series = [] self._series.extend(args) # The backend type. On every show() a new backend instance is created # in self._backend which is tightly coupled to the Plot instance # (thanks to the parent attribute of the backend). self.backend = DefaultBackend # The keyword arguments should only contain options for the plot. for key, val in kwargs.items(): if hasattr(self, key): setattr(self, key, val) def show(self): # TODO move this to the backend (also for save) if hasattr(self, '_backend'): self._backend.close() self._backend = self.backend(self) self._backend.show() def save(self, path): if hasattr(self, '_backend'): self._backend.close() self._backend = self.backend(self) self._backend.save(path) def __str__(self): series_strs = [('[%d]: ' % i) + str(s) for i, s in enumerate(self._series)] return 'Plot object containing:\n' + '\n'.join(series_strs) def __getitem__(self, index): return self._series[index] def __setitem__(self, index, *args): if len(args) == 1 and isinstance(args[0], BaseSeries): self._series[index] = args def __delitem__(self, index): del self._series[index] @doctest_depends_on(modules=('numpy', 'matplotlib',)) def append(self, arg): """Adds an element from a plot's series to an existing plot. Examples ======== Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the second plot's first series object to the first, use the ``append`` method, like so: >>> from sympy import symbols >>> from sympy.plotting import plot >>> x = symbols('x') >>> p1 = plot(x*x) >>> p2 = plot(x) >>> p1.append(p2[0]) >>> p1 Plot object containing: [0]: cartesian line: x**2 for x over (-10.0, 10.0) [1]: cartesian line: x for x over (-10.0, 10.0) See Also ======== extend """ if isinstance(arg, BaseSeries): self._series.append(arg) else: raise TypeError('Must specify element of plot to append.') @doctest_depends_on(modules=('numpy', 'matplotlib',)) def extend(self, arg): """Adds all series from another plot. Examples ======== Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the second plot to the first, use the ``extend`` method, like so: >>> from sympy import symbols >>> from sympy.plotting import plot >>> x = symbols('x') >>> p1 = plot(x*x) >>> p2 = plot(x) >>> p1.extend(p2) >>> p1 Plot object containing: [0]: cartesian line: x**2 for x over (-10.0, 10.0) [1]: cartesian line: x for x over (-10.0, 10.0) """ if isinstance(arg, Plot): self._series.extend(arg._series) elif is_sequence(arg): self._series.extend(arg) else: raise TypeError('Expecting Plot or sequence of BaseSeries') ############################################################################## # Data Series ############################################################################## #TODO more general way to calculate aesthetics (see get_color_array) ### The base class for all series class BaseSeries(object): """Base class for the data objects containing stuff to be plotted. The backend should check if it supports the data series that it's given. (eg TextBackend supports only LineOver1DRange). It's the backend responsibility to know how to use the class of data series that it's given. Some data series classes are grouped (using a class attribute like is_2Dline) according to the api they present (based only on convention). The backend is not obliged to use that api (eg. The LineOver1DRange belongs to the is_2Dline group and presents the get_points method, but the TextBackend does not use the get_points method). """ # Some flags follow. The rationale for using flags instead of checking base # classes is that setting multiple flags is simpler than multiple # inheritance. is_2Dline = False # Some of the backends expect: # - get_points returning 1D np.arrays list_x, list_y # - get_segments returning np.array (done in Line2DBaseSeries) # - get_color_array returning 1D np.array (done in Line2DBaseSeries) # with the colors calculated at the points from get_points is_3Dline = False # Some of the backends expect: # - get_points returning 1D np.arrays list_x, list_y, list_y # - get_segments returning np.array (done in Line2DBaseSeries) # - get_color_array returning 1D np.array (done in Line2DBaseSeries) # with the colors calculated at the points from get_points is_3Dsurface = False # Some of the backends expect: # - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays) # - get_points an alias for get_meshes is_contour = False # Some of the backends expect: # - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays) # - get_points an alias for get_meshes is_implicit = False # Some of the backends expect: # - get_meshes returning mesh_x (1D array), mesh_y(1D array, # mesh_z (2D np.arrays) # - get_points an alias for get_meshes #Different from is_contour as the colormap in backend will be #different is_parametric = False # The calculation of aesthetics expects: # - get_parameter_points returning one or two np.arrays (1D or 2D) # used for calculation aesthetics def __init__(self): super(BaseSeries, self).__init__() @property def is_3D(self): flags3D = [ self.is_3Dline, self.is_3Dsurface ] return any(flags3D) @property def is_line(self): flagslines = [ self.is_2Dline, self.is_3Dline ] return any(flagslines) ### 2D lines class Line2DBaseSeries(BaseSeries): """A base class for 2D lines. - adding the label, steps and only_integers options - making is_2Dline true - defining get_segments and get_color_array """ is_2Dline = True _dim = 2 def __init__(self): super(Line2DBaseSeries, self).__init__() self.label = None self.steps = False self.only_integers = False self.line_color = None def get_segments(self): np = import_module('numpy') points = self.get_points() if self.steps is True: x = np.array((points[0], points[0])).T.flatten()[1:] y = np.array((points[1], points[1])).T.flatten()[:-1] points = (x, y) points = np.ma.array(points).T.reshape(-1, 1, self._dim) return np.ma.concatenate([points[:-1], points[1:]], axis=1) def get_color_array(self): np = import_module('numpy') c = self.line_color if hasattr(c, '__call__'): f = np.vectorize(c) arity = _arity(c) if arity == 1 and self.is_parametric: x = self.get_parameter_points() return f(centers_of_segments(x)) else: variables = list(map(centers_of_segments, self.get_points())) if arity == 1: return f(variables[0]) elif arity == 2: return f(*variables[:2]) else: # only if the line is 3D (otherwise raises an error) return f(*variables) else: return c*np.ones(self.nb_of_points) class List2DSeries(Line2DBaseSeries): """Representation for a line consisting of list of points.""" def __init__(self, list_x, list_y): np = import_module('numpy') super(List2DSeries, self).__init__() self.list_x = np.array(list_x) self.list_y = np.array(list_y) self.label = 'list' def __str__(self): return 'list plot' def get_points(self): return (self.list_x, self.list_y) class LineOver1DRangeSeries(Line2DBaseSeries): """Representation for a line consisting of a SymPy expression over a range.""" def __init__(self, expr, var_start_end, **kwargs): super(LineOver1DRangeSeries, self).__init__() self.expr = sympify(expr) self.label = str(self.expr) self.var = sympify(var_start_end[0]) self.start = float(var_start_end[1]) self.end = float(var_start_end[2]) self.nb_of_points = kwargs.get('nb_of_points', 300) self.adaptive = kwargs.get('adaptive', True) self.depth = kwargs.get('depth', 12) self.line_color = kwargs.get('line_color', None) def __str__(self): return 'cartesian line: %s for %s over %s' % ( str(self.expr), str(self.var), str((self.start, self.end))) def get_segments(self): """ Adaptively gets segments for plotting. The adaptive sampling is done by recursively checking if three points are almost collinear. If they are not collinear, then more points are added between those points. References ========== [1] Adaptive polygonal approximation of parametric curves, Luiz Henrique de Figueiredo. """ if self.only_integers or not self.adaptive: return super(LineOver1DRangeSeries, self).get_segments() else: f = lambdify([self.var], self.expr) list_segments = [] def sample(p, q, depth): """ Samples recursively if three points are almost collinear. For depth < 6, points are added irrespective of whether they satisfy the collinearity condition or not. The maximum depth allowed is 12. """ np = import_module('numpy') #Randomly sample to avoid aliasing. random = 0.45 + np.random.rand() * 0.1 xnew = p[0] + random * (q[0] - p[0]) ynew = f(xnew) new_point = np.array([xnew, ynew]) #Maximum depth if depth > self.depth: list_segments.append([p, q]) #Sample irrespective of whether the line is flat till the #depth of 6. We are not using linspace to avoid aliasing. elif depth < 6: sample(p, new_point, depth + 1) sample(new_point, q, depth + 1) #Sample ten points if complex values are encountered #at both ends. If there is a real value in between, then #sample those points further. elif p[1] is None and q[1] is None: xarray = np.linspace(p[0], q[0], 10) yarray = list(map(f, xarray)) if any(y is not None for y in yarray): for i in range(len(yarray) - 1): if yarray[i] is not None or yarray[i + 1] is not None: sample([xarray[i], yarray[i]], [xarray[i + 1], yarray[i + 1]], depth + 1) #Sample further if one of the end points in None( i.e. a complex #value) or the three points are not almost collinear. elif (p[1] is None or q[1] is None or new_point[1] is None or not flat(p, new_point, q)): sample(p, new_point, depth + 1) sample(new_point, q, depth + 1) else: list_segments.append([p, q]) f_start = f(self.start) f_end = f(self.end) sample([self.start, f_start], [self.end, f_end], 0) return list_segments def get_points(self): np = import_module('numpy') if self.only_integers is True: list_x = np.linspace(int(self.start), int(self.end), num=int(self.end) - int(self.start) + 1) else: list_x = np.linspace(self.start, self.end, num=self.nb_of_points) f = vectorized_lambdify([self.var], self.expr) list_y = f(list_x) return (list_x, list_y) class Parametric2DLineSeries(Line2DBaseSeries): """Representation for a line consisting of two parametric sympy expressions over a range.""" is_parametric = True def __init__(self, expr_x, expr_y, var_start_end, **kwargs): super(Parametric2DLineSeries, self).__init__() self.expr_x = sympify(expr_x) self.expr_y = sympify(expr_y) self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y)) self.var = sympify(var_start_end[0]) self.start = float(var_start_end[1]) self.end = float(var_start_end[2]) self.nb_of_points = kwargs.get('nb_of_points', 300) self.adaptive = kwargs.get('adaptive', True) self.depth = kwargs.get('depth', 12) self.line_color = kwargs.get('line_color', None) def __str__(self): return 'parametric cartesian line: (%s, %s) for %s over %s' % ( str(self.expr_x), str(self.expr_y), str(self.var), str((self.start, self.end))) def get_parameter_points(self): np = import_module('numpy') return np.linspace(self.start, self.end, num=self.nb_of_points) def get_points(self): param = self.get_parameter_points() fx = vectorized_lambdify([self.var], self.expr_x) fy = vectorized_lambdify([self.var], self.expr_y) list_x = fx(param) list_y = fy(param) return (list_x, list_y) def get_segments(self): """ Adaptively gets segments for plotting. The adaptive sampling is done by recursively checking if three points are almost collinear. If they are not collinear, then more points are added between those points. References ========== [1] Adaptive polygonal approximation of parametric curves, Luiz Henrique de Figueiredo. """ if not self.adaptive: return super(Parametric2DLineSeries, self).get_segments() f_x = lambdify([self.var], self.expr_x) f_y = lambdify([self.var], self.expr_y) list_segments = [] def sample(param_p, param_q, p, q, depth): """ Samples recursively if three points are almost collinear. For depth < 6, points are added irrespective of whether they satisfy the collinearity condition or not. The maximum depth allowed is 12. """ #Randomly sample to avoid aliasing. np = import_module('numpy') random = 0.45 + np.random.rand() * 0.1 param_new = param_p + random * (param_q - param_p) xnew = f_x(param_new) ynew = f_y(param_new) new_point = np.array([xnew, ynew]) #Maximum depth if depth > self.depth: list_segments.append([p, q]) #Sample irrespective of whether the line is flat till the #depth of 6. We are not using linspace to avoid aliasing. elif depth < 6: sample(param_p, param_new, p, new_point, depth + 1) sample(param_new, param_q, new_point, q, depth + 1) #Sample ten points if complex values are encountered #at both ends. If there is a real value in between, then #sample those points further. elif ((p[0] is None and q[1] is None) or (p[1] is None and q[1] is None)): param_array = np.linspace(param_p, param_q, 10) x_array = list(map(f_x, param_array)) y_array = list(map(f_y, param_array)) if any(x is not None and y is not None for x, y in zip(x_array, y_array)): for i in range(len(y_array) - 1): if ((x_array[i] is not None and y_array[i] is not None) or (x_array[i + 1] is not None and y_array[i + 1] is not None)): point_a = [x_array[i], y_array[i]] point_b = [x_array[i + 1], y_array[i + 1]] sample(param_array[i], param_array[i], point_a, point_b, depth + 1) #Sample further if one of the end points in None( ie a complex #value) or the three points are not almost collinear. elif (p[0] is None or p[1] is None or q[1] is None or q[0] is None or not flat(p, new_point, q)): sample(param_p, param_new, p, new_point, depth + 1) sample(param_new, param_q, new_point, q, depth + 1) else: list_segments.append([p, q]) f_start_x = f_x(self.start) f_start_y = f_y(self.start) start = [f_start_x, f_start_y] f_end_x = f_x(self.end) f_end_y = f_y(self.end) end = [f_end_x, f_end_y] sample(self.start, self.end, start, end, 0) return list_segments ### 3D lines class Line3DBaseSeries(Line2DBaseSeries): """A base class for 3D lines. Most of the stuff is derived from Line2DBaseSeries.""" is_2Dline = False is_3Dline = True _dim = 3 def __init__(self): super(Line3DBaseSeries, self).__init__() class Parametric3DLineSeries(Line3DBaseSeries): """Representation for a 3D line consisting of two parametric sympy expressions and a range.""" def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs): super(Parametric3DLineSeries, self).__init__() self.expr_x = sympify(expr_x) self.expr_y = sympify(expr_y) self.expr_z = sympify(expr_z) self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y)) self.var = sympify(var_start_end[0]) self.start = float(var_start_end[1]) self.end = float(var_start_end[2]) self.nb_of_points = kwargs.get('nb_of_points', 300) self.line_color = kwargs.get('line_color', None) def __str__(self): return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % ( str(self.expr_x), str(self.expr_y), str(self.expr_z), str(self.var), str((self.start, self.end))) def get_parameter_points(self): np = import_module('numpy') return np.linspace(self.start, self.end, num=self.nb_of_points) def get_points(self): param = self.get_parameter_points() fx = vectorized_lambdify([self.var], self.expr_x) fy = vectorized_lambdify([self.var], self.expr_y) fz = vectorized_lambdify([self.var], self.expr_z) list_x = fx(param) list_y = fy(param) list_z = fz(param) return (list_x, list_y, list_z) ### Surfaces class SurfaceBaseSeries(BaseSeries): """A base class for 3D surfaces.""" is_3Dsurface = True def __init__(self): super(SurfaceBaseSeries, self).__init__() self.surface_color = None def get_color_array(self): np = import_module('numpy') c = self.surface_color if isinstance(c, Callable): f = np.vectorize(c) arity = _arity(c) if self.is_parametric: variables = list(map(centers_of_faces, self.get_parameter_meshes())) if arity == 1: return f(variables[0]) elif arity == 2: return f(*variables) variables = list(map(centers_of_faces, self.get_meshes())) if arity == 1: return f(variables[0]) elif arity == 2: return f(*variables[:2]) else: return f(*variables) else: return c*np.ones(self.nb_of_points) class SurfaceOver2DRangeSeries(SurfaceBaseSeries): """Representation for a 3D surface consisting of a sympy expression and 2D range.""" def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs): super(SurfaceOver2DRangeSeries, self).__init__() self.expr = sympify(expr) self.var_x = sympify(var_start_end_x[0]) self.start_x = float(var_start_end_x[1]) self.end_x = float(var_start_end_x[2]) self.var_y = sympify(var_start_end_y[0]) self.start_y = float(var_start_end_y[1]) self.end_y = float(var_start_end_y[2]) self.nb_of_points_x = kwargs.get('nb_of_points_x', 50) self.nb_of_points_y = kwargs.get('nb_of_points_y', 50) self.surface_color = kwargs.get('surface_color', None) def __str__(self): return ('cartesian surface: %s for' ' %s over %s and %s over %s') % ( str(self.expr), str(self.var_x), str((self.start_x, self.end_x)), str(self.var_y), str((self.start_y, self.end_y))) def get_meshes(self): np = import_module('numpy') mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x, num=self.nb_of_points_x), np.linspace(self.start_y, self.end_y, num=self.nb_of_points_y)) f = vectorized_lambdify((self.var_x, self.var_y), self.expr) return (mesh_x, mesh_y, f(mesh_x, mesh_y)) class ParametricSurfaceSeries(SurfaceBaseSeries): """Representation for a 3D surface consisting of three parametric sympy expressions and a range.""" is_parametric = True def __init__( self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v, **kwargs): super(ParametricSurfaceSeries, self).__init__() self.expr_x = sympify(expr_x) self.expr_y = sympify(expr_y) self.expr_z = sympify(expr_z) self.var_u = sympify(var_start_end_u[0]) self.start_u = float(var_start_end_u[1]) self.end_u = float(var_start_end_u[2]) self.var_v = sympify(var_start_end_v[0]) self.start_v = float(var_start_end_v[1]) self.end_v = float(var_start_end_v[2]) self.nb_of_points_u = kwargs.get('nb_of_points_u', 50) self.nb_of_points_v = kwargs.get('nb_of_points_v', 50) self.surface_color = kwargs.get('surface_color', None) def __str__(self): return ('parametric cartesian surface: (%s, %s, %s) for' ' %s over %s and %s over %s') % ( str(self.expr_x), str(self.expr_y), str(self.expr_z), str(self.var_u), str((self.start_u, self.end_u)), str(self.var_v), str((self.start_v, self.end_v))) def get_parameter_meshes(self): np = import_module('numpy') return np.meshgrid(np.linspace(self.start_u, self.end_u, num=self.nb_of_points_u), np.linspace(self.start_v, self.end_v, num=self.nb_of_points_v)) def get_meshes(self): mesh_u, mesh_v = self.get_parameter_meshes() fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x) fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y) fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z) return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v)) ### Contours class ContourSeries(BaseSeries): """Representation for a contour plot.""" #The code is mostly repetition of SurfaceOver2DRange. #XXX: Presently not used in any of those functions. #XXX: Add contour plot and use this seties. is_contour = True def __init__(self, expr, var_start_end_x, var_start_end_y): super(ContourSeries, self).__init__() self.nb_of_points_x = 50 self.nb_of_points_y = 50 self.expr = sympify(expr) self.var_x = sympify(var_start_end_x[0]) self.start_x = float(var_start_end_x[1]) self.end_x = float(var_start_end_x[2]) self.var_y = sympify(var_start_end_y[0]) self.start_y = float(var_start_end_y[1]) self.end_y = float(var_start_end_y[2]) self.get_points = self.get_meshes def __str__(self): return ('contour: %s for ' '%s over %s and %s over %s') % ( str(self.expr), str(self.var_x), str((self.start_x, self.end_x)), str(self.var_y), str((self.start_y, self.end_y))) def get_meshes(self): np = import_module('numpy') mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x, num=self.nb_of_points_x), np.linspace(self.start_y, self.end_y, num=self.nb_of_points_y)) f = vectorized_lambdify((self.var_x, self.var_y), self.expr) return (mesh_x, mesh_y, f(mesh_x, mesh_y)) ############################################################################## # Backends ############################################################################## class BaseBackend(object): def __init__(self, parent): super(BaseBackend, self).__init__() self.parent = parent ## don't have to check for the success of importing matplotlib in each case; ## we will only be using this backend if we can successfully import matploblib class MatplotlibBackend(BaseBackend): def __init__(self, parent): super(MatplotlibBackend, self).__init__(parent) are_3D = [s.is_3D for s in self.parent._series] self.matplotlib = import_module('matplotlib', __import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']}, min_module_version='1.1.0', catch=(RuntimeError,)) self.plt = self.matplotlib.pyplot self.cm = self.matplotlib.cm self.LineCollection = self.matplotlib.collections.LineCollection if any(are_3D) and not all(are_3D): raise ValueError('The matplotlib backend can not mix 2D and 3D.') elif not any(are_3D): self.fig = self.plt.figure() self.ax = self.fig.add_subplot(111) self.ax.spines['left'].set_position('zero') self.ax.spines['right'].set_color('none') self.ax.spines['bottom'].set_position('zero') self.ax.spines['top'].set_color('none') self.ax.spines['left'].set_smart_bounds(True) self.ax.spines['bottom'].set_smart_bounds(False) self.ax.xaxis.set_ticks_position('bottom') self.ax.yaxis.set_ticks_position('left') elif all(are_3D): ## mpl_toolkits.mplot3d is necessary for ## projection='3d' mpl_toolkits = import_module('mpl_toolkits', __import__kwargs={'fromlist': ['mplot3d']}) self.fig = self.plt.figure() self.ax = self.fig.add_subplot(111, projection='3d') def process_series(self): parent = self.parent for s in self.parent._series: # Create the collections if s.is_2Dline: collection = self.LineCollection(s.get_segments()) self.ax.add_collection(collection) elif s.is_contour: self.ax.contour(*s.get_meshes()) elif s.is_3Dline: # TODO too complicated, I blame matplotlib mpl_toolkits = import_module('mpl_toolkits', __import__kwargs={'fromlist': ['mplot3d']}) art3d = mpl_toolkits.mplot3d.art3d collection = art3d.Line3DCollection(s.get_segments()) self.ax.add_collection(collection) x, y, z = s.get_points() self.ax.set_xlim((min(x), max(x))) self.ax.set_ylim((min(y), max(y))) self.ax.set_zlim((min(z), max(z))) elif s.is_3Dsurface: x, y, z = s.get_meshes() collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet, rstride=1, cstride=1, linewidth=0.1) elif s.is_implicit: #Smart bounds have to be set to False for implicit plots. self.ax.spines['left'].set_smart_bounds(False) self.ax.spines['bottom'].set_smart_bounds(False) points = s.get_raster() if len(points) == 2: #interval math plotting x, y = _matplotlib_list(points[0]) self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None') else: # use contourf or contour depending on whether it is # an inequality or equality. #XXX: ``contour`` plots multiple lines. Should be fixed. ListedColormap = self.matplotlib.colors.ListedColormap colormap = ListedColormap(["white", s.line_color]) xarray, yarray, zarray, plot_type = points if plot_type == 'contour': self.ax.contour(xarray, yarray, zarray, contours=(0, 0), fill=False, cmap=colormap) else: self.ax.contourf(xarray, yarray, zarray, cmap=colormap) else: raise ValueError('The matplotlib backend supports only ' 'is_2Dline, is_3Dline, is_3Dsurface and ' 'is_contour objects.') # Customise the collections with the corresponding per-series # options. if hasattr(s, 'label'): collection.set_label(s.label) if s.is_line and s.line_color: if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable): color_array = s.get_color_array() collection.set_array(color_array) else: collection.set_color(s.line_color) if s.is_3Dsurface and s.surface_color: if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check warnings.warn('The version of matplotlib is too old to use surface coloring.') elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable): color_array = s.get_color_array() color_array = color_array.reshape(color_array.size) collection.set_array(color_array) else: collection.set_color(s.surface_color) # Set global options. # TODO The 3D stuff # XXX The order of those is important. mpl_toolkits = import_module('mpl_toolkits', __import__kwargs={'fromlist': ['mplot3d']}) Axes3D = mpl_toolkits.mplot3d.Axes3D if parent.xscale and not isinstance(self.ax, Axes3D): self.ax.set_xscale(parent.xscale) if parent.yscale and not isinstance(self.ax, Axes3D): self.ax.set_yscale(parent.yscale) if parent.xlim: self.ax.set_xlim(parent.xlim) else: if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series): starts = [s.start for s in parent._series] ends = [s.end for s in parent._series] self.ax.set_xlim(min(starts), max(ends)) if parent.ylim: self.ax.set_ylim(parent.ylim) if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check self.ax.set_autoscale_on(parent.autoscale) if parent.axis_center: val = parent.axis_center if isinstance(self.ax, Axes3D): pass elif val == 'center': self.ax.spines['left'].set_position('center') self.ax.spines['bottom'].set_position('center') elif val == 'auto': xl, xh = self.ax.get_xlim() yl, yh = self.ax.get_ylim() pos_left = ('data', 0) if xl*xh <= 0 else 'center' pos_bottom = ('data', 0) if yl*yh <= 0 else 'center' self.ax.spines['left'].set_position(pos_left) self.ax.spines['bottom'].set_position(pos_bottom) else: self.ax.spines['left'].set_position(('data', val[0])) self.ax.spines['bottom'].set_position(('data', val[1])) if not parent.axis: self.ax.set_axis_off() if parent.legend: if self.ax.legend(): self.ax.legend_.set_visible(parent.legend) if parent.margin: self.ax.set_xmargin(parent.margin) self.ax.set_ymargin(parent.margin) if parent.title: self.ax.set_title(parent.title) if parent.xlabel: self.ax.set_xlabel(parent.xlabel, position=(1, 0)) if parent.ylabel: self.ax.set_ylabel(parent.ylabel, position=(0, 1)) def show(self): self.process_series() #TODO after fixing https://github.com/ipython/ipython/issues/1255 # you can uncomment the next line and remove the pyplot.show() call #self.fig.show() if _show: self.plt.show() def save(self, path): self.process_series() self.fig.savefig(path) def close(self): self.plt.close(self.fig) class TextBackend(BaseBackend): def __init__(self, parent): super(TextBackend, self).__init__(parent) def show(self): if len(self.parent._series) != 1: raise ValueError( 'The TextBackend supports only one graph per Plot.') elif not isinstance(self.parent._series[0], LineOver1DRangeSeries): raise ValueError( 'The TextBackend supports only expressions over a 1D range') else: ser = self.parent._series[0] textplot(ser.expr, ser.start, ser.end) def close(self): pass class DefaultBackend(BaseBackend): def __new__(cls, parent): matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) if matplotlib: return MatplotlibBackend(parent) else: return TextBackend(parent) plot_backends = { 'matplotlib': MatplotlibBackend, 'text': TextBackend, 'default': DefaultBackend } ############################################################################## # Finding the centers of line segments or mesh faces ############################################################################## def centers_of_segments(array): np = import_module('numpy') return np.average(np.vstack((array[:-1], array[1:])), 0) def centers_of_faces(array): np = import_module('numpy') return np.average(np.dstack((array[:-1, :-1], array[1:, :-1], array[:-1, 1: ], array[:-1, :-1], )), 2) def flat(x, y, z, eps=1e-3): """Checks whether three points are almost collinear""" np = import_module('numpy') # Workaround plotting piecewise (#8577): # workaround for `lambdify` in `.experimental_lambdify` fails # to return numerical values in some cases. Lower-level fix # in `lambdify` is possible. vector_a = (x - y).astype(np.float) vector_b = (z - y).astype(np.float) dot_product = np.dot(vector_a, vector_b) vector_a_norm = np.linalg.norm(vector_a) vector_b_norm = np.linalg.norm(vector_b) cos_theta = dot_product / (vector_a_norm * vector_b_norm) return abs(cos_theta + 1) < eps def _matplotlib_list(interval_list): """ Returns lists for matplotlib ``fill`` command from a list of bounding rectangular intervals """ xlist = [] ylist = [] if len(interval_list): for intervals in interval_list: intervalx = intervals[0] intervaly = intervals[1] xlist.extend([intervalx.start, intervalx.start, intervalx.end, intervalx.end, None]) ylist.extend([intervaly.start, intervaly.end, intervaly.end, intervaly.start, None]) else: #XXX Ugly hack. Matplotlib does not accept empty lists for ``fill`` xlist.extend([None, None, None, None]) ylist.extend([None, None, None, None]) return xlist, ylist ####New API for plotting module #### # TODO: Add color arrays for plots. # TODO: Add more plotting options for 3d plots. # TODO: Adaptive sampling for 3D plots. @doctest_depends_on(modules=('numpy', 'matplotlib',)) def plot(*args, **kwargs): """ Plots a function of a single variable and returns an instance of the ``Plot`` class (also, see the description of the ``show`` keyword argument below). The plotting uses an adaptive algorithm which samples recursively to accurately plot the plot. The adaptive algorithm uses a random point near the midpoint of two points that has to be further sampled. Hence the same plots can appear slightly different. Usage ===== Single Plot ``plot(expr, range, **kwargs)`` If the range is not specified, then a default range of (-10, 10) is used. Multiple plots with same range. ``plot(expr1, expr2, ..., range, **kwargs)`` If the range is not specified, then a default range of (-10, 10) is used. Multiple plots with different ranges. ``plot((expr1, range), (expr2, range), ..., **kwargs)`` Range has to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= ``expr`` : Expression representing the function of single variable ``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable. Keyword Arguments ================= Arguments for ``plot`` function: ``show``: Boolean. The default value is set to ``True``. Set show to ``False`` and the function will not display the plot. The returned instance of the ``Plot`` class can then be used to save or display the plot by calling the ``save()`` and ``show()`` methods respectively. Arguments for ``LineOver1DRangeSeries`` class: ``adaptive``: Boolean. The default value is set to True. Set adaptive to False and specify ``nb_of_points`` if uniform sampling is required. ``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n`` samples a maximum of `2^{n}` points. ``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function is uniformly sampled at ``nb_of_points`` number of points. Aesthetics options: ``line_color``: float. Specifies the color for the plot. See ``Plot`` to see how to set color for the plots. If there are multiple plots, then the same series series are applied to all the plots. If you want to set these options separately, you can index the ``Plot`` object returned and set it. Arguments for ``Plot`` class: ``title`` : str. Title of the plot. It is set to the latex representation of the expression, if the plot has only one expression. ``xlabel`` : str. Label for the x-axis. ``ylabel`` : str. Label for the y-axis. ``xscale``: {'linear', 'log'} Sets the scaling of the x-axis. ``yscale``: {'linear', 'log'} Sets the scaling if the y-axis. ``axis_center``: tuple of two floats denoting the coordinates of the center or {'center', 'auto'} ``xlim`` : tuple of two floats, denoting the x-axis limits. ``ylim`` : tuple of two floats, denoting the y-axis limits. Examples ======== >>> from sympy import symbols >>> from sympy.plotting import plot >>> x = symbols('x') Single Plot >>> plot(x**2, (x, -5, 5)) Plot object containing: [0]: cartesian line: x**2 for x over (-5.0, 5.0) Multiple plots with single range. >>> plot(x, x**2, x**3, (x, -5, 5)) Plot object containing: [0]: cartesian line: x for x over (-5.0, 5.0) [1]: cartesian line: x**2 for x over (-5.0, 5.0) [2]: cartesian line: x**3 for x over (-5.0, 5.0) Multiple plots with different ranges. >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5))) Plot object containing: [0]: cartesian line: x**2 for x over (-6.0, 6.0) [1]: cartesian line: x for x over (-5.0, 5.0) No adaptive sampling. >>> plot(x**2, adaptive=False, nb_of_points=400) Plot object containing: [0]: cartesian line: x**2 for x over (-10.0, 10.0) See Also ======== Plot, LineOver1DRangeSeries. """ args = list(map(sympify, args)) free = set() for a in args: if isinstance(a, Expr): free |= a.free_symbols if len(free) > 1: raise ValueError( 'The same variable should be used in all ' 'univariate expressions being plotted.') x = free.pop() if free else Symbol('x') kwargs.setdefault('xlabel', x.name) kwargs.setdefault('ylabel', 'f(%s)' % x.name) show = kwargs.pop('show', True) series = [] plot_expr = check_arguments(args, 1, 1) series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots @doctest_depends_on(modules=('numpy', 'matplotlib',)) def plot_parametric(*args, **kwargs): """ Plots a 2D parametric plot. The plotting uses an adaptive algorithm which samples recursively to accurately plot the plot. The adaptive algorithm uses a random point near the midpoint of two points that has to be further sampled. Hence the same plots can appear slightly different. Usage ===== Single plot. ``plot_parametric(expr_x, expr_y, range, **kwargs)`` If the range is not specified, then a default range of (-10, 10) is used. Multiple plots with same range. ``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)`` If the range is not specified, then a default range of (-10, 10) is used. Multiple plots with different ranges. ``plot_parametric((expr_x, expr_y, range), ..., **kwargs)`` Range has to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= ``expr_x`` : Expression representing the function along x. ``expr_y`` : Expression representing the function along y. ``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter variable. Keyword Arguments ================= Arguments for ``Parametric2DLineSeries`` class: ``adaptive``: Boolean. The default value is set to True. Set adaptive to False and specify ``nb_of_points`` if uniform sampling is required. ``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n`` samples a maximum of `2^{n}` points. ``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function is uniformly sampled at ``nb_of_points`` number of points. Aesthetics ---------- ``line_color``: function which returns a float. Specifies the color for the plot. See ``sympy.plotting.Plot`` for more details. If there are multiple plots, then the same Series arguments are applied to all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class: ``xlabel`` : str. Label for the x-axis. ``ylabel`` : str. Label for the y-axis. ``xscale``: {'linear', 'log'} Sets the scaling of the x-axis. ``yscale``: {'linear', 'log'} Sets the scaling if the y-axis. ``axis_center``: tuple of two floats denoting the coordinates of the center or {'center', 'auto'} ``xlim`` : tuple of two floats, denoting the x-axis limits. ``ylim`` : tuple of two floats, denoting the y-axis limits. Examples ======== >>> from sympy import symbols, cos, sin >>> from sympy.plotting import plot_parametric >>> u = symbols('u') Single Parametric plot >>> plot_parametric(cos(u), sin(u), (u, -5, 5)) Plot object containing: [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0) Multiple parametric plot with single range. >>> plot_parametric((cos(u), sin(u)), (u, cos(u))) Plot object containing: [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0) [1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0) Multiple parametric plots. >>> plot_parametric((cos(u), sin(u), (u, -5, 5)), ... (cos(u), u, (u, -5, 5))) Plot object containing: [0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0) [1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0) See Also ======== Plot, Parametric2DLineSeries """ args = list(map(sympify, args)) show = kwargs.pop('show', True) series = [] plot_expr = check_arguments(args, 2, 1) series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots @doctest_depends_on(modules=('numpy', 'matplotlib',)) def plot3d_parametric_line(*args, **kwargs): """ Plots a 3D parametric line plot. Usage ===== Single plot: ``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)`` If the range is not specified, then a default range of (-10, 10) is used. Multiple plots. ``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)`` Ranges have to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= ``expr_x`` : Expression representing the function along x. ``expr_y`` : Expression representing the function along y. ``expr_z`` : Expression representing the function along z. ``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter variable. Keyword Arguments ================= Arguments for ``Parametric3DLineSeries`` class. ``nb_of_points``: The range is uniformly sampled at ``nb_of_points`` number of points. Aesthetics: ``line_color``: function which returns a float. Specifies the color for the plot. See ``sympy.plotting.Plot`` for more details. If there are multiple plots, then the same series arguments are applied to all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class. ``title`` : str. Title of the plot. Examples ======== >>> from sympy import symbols, cos, sin >>> from sympy.plotting import plot3d_parametric_line >>> u = symbols('u') Single plot. >>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5)) Plot object containing: [0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0) Multiple plots. >>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)), ... (sin(u), u**2, u, (u, -5, 5))) Plot object containing: [0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0) [1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0) See Also ======== Plot, Parametric3DLineSeries """ args = list(map(sympify, args)) show = kwargs.pop('show', True) series = [] plot_expr = check_arguments(args, 3, 1) series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots @doctest_depends_on(modules=('numpy', 'matplotlib',)) def plot3d(*args, **kwargs): """ Plots a 3D surface plot. Usage ===== Single plot ``plot3d(expr, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plot with the same range. ``plot3d(expr1, expr2, range_x, range_y, **kwargs)`` If the ranges are not specified, then a default range of (-10, 10) is used. Multiple plots with different ranges. ``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)`` Ranges have to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= ``expr`` : Expression representing the function along x. ``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x variable. ``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y variable. Keyword Arguments ================= Arguments for ``SurfaceOver2DRangeSeries`` class: ``nb_of_points_x``: int. The x range is sampled uniformly at ``nb_of_points_x`` of points. ``nb_of_points_y``: int. The y range is sampled uniformly at ``nb_of_points_y`` of points. Aesthetics: ``surface_color``: Function which returns a float. Specifies the color for the surface of the plot. See ``sympy.plotting.Plot`` for more details. If there are multiple plots, then the same series arguments are applied to all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class: ``title`` : str. Title of the plot. Examples ======== >>> from sympy import symbols >>> from sympy.plotting import plot3d >>> x, y = symbols('x y') Single plot >>> plot3d(x*y, (x, -5, 5), (y, -5, 5)) Plot object containing: [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) Multiple plots with same range >>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5)) Plot object containing: [0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) [1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0) Multiple plots with different ranges. >>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)), ... (x*y, (x, -3, 3), (y, -3, 3))) Plot object containing: [0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0) [1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0) See Also ======== Plot, SurfaceOver2DRangeSeries """ args = list(map(sympify, args)) show = kwargs.pop('show', True) series = [] plot_expr = check_arguments(args, 1, 2) series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots @doctest_depends_on(modules=('numpy', 'matplotlib',)) def plot3d_parametric_surface(*args, **kwargs): """ Plots a 3D parametric surface plot. Usage ===== Single plot. ``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)`` If the ranges is not specified, then a default range of (-10, 10) is used. Multiple plots. ``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)`` Ranges have to be specified for every expression. Default range may change in the future if a more advanced default range detection algorithm is implemented. Arguments ========= ``expr_x``: Expression representing the function along ``x``. ``expr_y``: Expression representing the function along ``y``. ``expr_z``: Expression representing the function along ``z``. ``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u`` variable. ``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v variable. Keyword Arguments ================= Arguments for ``ParametricSurfaceSeries`` class: ``nb_of_points_u``: int. The ``u`` range is sampled uniformly at ``nb_of_points_v`` of points ``nb_of_points_y``: int. The ``v`` range is sampled uniformly at ``nb_of_points_y`` of points Aesthetics: ``surface_color``: Function which returns a float. Specifies the color for the surface of the plot. See ``sympy.plotting.Plot`` for more details. If there are multiple plots, then the same series arguments are applied for all the plots. If you want to set these options separately, you can index the returned ``Plot`` object and set it. Arguments for ``Plot`` class: ``title`` : str. Title of the plot. Examples ======== >>> from sympy import symbols, cos, sin >>> from sympy.plotting import plot3d_parametric_surface >>> u, v = symbols('u v') Single plot. >>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v, ... (u, -5, 5), (v, -5, 5)) Plot object containing: [0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0) See Also ======== Plot, ParametricSurfaceSeries """ args = list(map(sympify, args)) show = kwargs.pop('show', True) series = [] plot_expr = check_arguments(args, 3, 2) series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr] plots = Plot(*series, **kwargs) if show: plots.show() return plots def check_arguments(args, expr_len, nb_of_free_symbols): """ Checks the arguments and converts into tuples of the form (exprs, ranges) Examples ======== >>> from sympy import plot, cos, sin, symbols >>> from sympy.plotting.plot import check_arguments >>> x = symbols('x') >>> check_arguments([cos(x), sin(x)], 2, 1) [(cos(x), sin(x), (x, -10, 10))] >>> check_arguments([x, x**2], 1, 1) [(x, (x, -10, 10)), (x**2, (x, -10, 10))] """ if expr_len > 1 and isinstance(args[0], Expr): # Multiple expressions same range. # The arguments are tuples when the expression length is # greater than 1. if len(args) < expr_len: raise ValueError("len(args) should not be less than expr_len") for i in range(len(args)): if isinstance(args[i], Tuple): break else: i = len(args) + 1 exprs = Tuple(*args[:i]) free_symbols = list(set().union(*[e.free_symbols for e in exprs])) if len(args) == expr_len + nb_of_free_symbols: #Ranges given plots = [exprs + Tuple(*args[expr_len:])] else: default_range = Tuple(-10, 10) ranges = [] for symbol in free_symbols: ranges.append(Tuple(symbol) + default_range) for i in range(len(free_symbols) - nb_of_free_symbols): ranges.append(Tuple(Dummy()) + default_range) plots = [exprs + Tuple(*ranges)] return plots if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and len(args[0]) == expr_len and expr_len != 3): # Cannot handle expressions with number of expression = 3. It is # not possible to differentiate between expressions and ranges. #Series of plots with same range for i in range(len(args)): if isinstance(args[i], Tuple) and len(args[i]) != expr_len: break if not isinstance(args[i], Tuple): args[i] = Tuple(args[i]) else: i = len(args) + 1 exprs = args[:i] assert all(isinstance(e, Expr) for expr in exprs for e in expr) free_symbols = list(set().union(*[e.free_symbols for expr in exprs for e in expr])) if len(free_symbols) > nb_of_free_symbols: raise ValueError("The number of free_symbols in the expression " "is greater than %d" % nb_of_free_symbols) if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple): ranges = Tuple(*[range_expr for range_expr in args[ i:i + nb_of_free_symbols]]) plots = [expr + ranges for expr in exprs] return plots else: #Use default ranges. default_range = Tuple(-10, 10) ranges = [] for symbol in free_symbols: ranges.append(Tuple(symbol) + default_range) for i in range(len(free_symbols) - nb_of_free_symbols): ranges.append(Tuple(Dummy()) + default_range) ranges = Tuple(*ranges) plots = [expr + ranges for expr in exprs] return plots elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols: #Multiple plots with different ranges. for arg in args: for i in range(expr_len): if not isinstance(arg[i], Expr): raise ValueError("Expected an expression, given %s" % str(arg[i])) for i in range(nb_of_free_symbols): if not len(arg[i + expr_len]) == 3: raise ValueError("The ranges should be a tuple of " "length 3, got %s" % str(arg[i + expr_len])) return args
bsd-3-clause
mxjl620/scikit-learn
examples/manifold/plot_mds.py
261
2616
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Licence: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20) plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g') plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b') plt.legend(('True position', 'MDS', 'NMDS'), loc='best') similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
cosmodesi/snsurvey
src/control.py
1
1120
#!/usr/bin/env python import numpy import sncosmo import scipy.optimize import matplotlib.pyplot as plt model=sncosmo.Model(source='salt2-extended') def f(t ,rlim): # print t, model.bandflux('desr',t, zp = rlim, zpsys='ab') return model.bandflux('desr',t, zp = rlim, zpsys='ab')-1. def controlTime(z,rlim): model.set(z=z, t0=55000.) model.set_source_peakabsmag(absmag=-19.3,band='bessellb',magsys='ab') pre = scipy.optimize.fsolve(f, 55000.-15*(1+z) ,args=(rlim),xtol=1e-8) post = scipy.optimize.fsolve(f, 55000.+20*(1+z) ,args=(rlim),xtol=1e-8) return max(post[0]-pre[0],0) # print scipy.optimize.fsolve(f, 55000.+40,args=(rlim),factor=1.,xtol=1e-8) def plot(): lmag = numpy.arange(19.5,21.6,0.5) zs = numpy.arange(0.02, 0.2501,0.02) ans = [] for lm in lmag: ans_=[] for z in zs: ans_.append(controlTime(z,lm)) ans.append(ans_) for lm, ct in zip(lmag, ans): plt.plot(zs, ct, label = '$r_{{lim}} = {}$'.format(str(lm))) plt.xlabel(r'$z$') plt.ylabel(r'control time (days)') plt.legend() plt.show()
bsd-3-clause
rajat1994/scikit-learn
examples/plot_kernel_ridge_regression.py
230
6222
""" ============================================= Comparison of kernel ridge regression and SVR ============================================= Both kernel ridge regression (KRR) and SVR learn a non-linear function by employing the kernel trick, i.e., they learn a linear function in the space induced by the respective kernel which corresponds to a non-linear function in the original space. They differ in the loss functions (ridge versus epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR at prediction-time. This example illustrates both methods on an artificial dataset, which consists of a sinusoidal target function and strong noise added to every fifth datapoint. The first figure compares the learned model of KRR and SVR when both complexity/regularization and bandwidth of the RBF kernel are optimized using grid-search. The learned functions are very similar; however, fitting KRR is approx. seven times faster than fitting SVR (both with grid-search). However, prediction of 100000 target values is more than tree times faster with SVR since it has learned a sparse model using only approx. 1/3 of the 100 training datapoints as support vectors. The next figure compares the time for fitting and prediction of KRR and SVR for different sizes of the training set. Fitting KRR is faster than SVR for medium- sized training sets (less than 1000 samples); however, for larger training sets SVR scales better. With regard to prediction time, SVR is faster than KRR for all sizes of the training set because of the learned sparse solution. Note that the degree of sparsity and thus the prediction time depends on the parameters epsilon and C of the SVR. """ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD 3 clause from __future__ import division import time import numpy as np from sklearn.svm import SVR from sklearn.grid_search import GridSearchCV from sklearn.learning_curve import learning_curve from sklearn.kernel_ridge import KernelRidge import matplotlib.pyplot as plt rng = np.random.RandomState(0) ############################################################################# # Generate sample data X = 5 * rng.rand(10000, 1) y = np.sin(X).ravel() # Add noise to targets y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5)) X_plot = np.linspace(0, 5, 100000)[:, None] ############################################################################# # Fit regression model train_size = 100 svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5, param_grid={"C": [1e0, 1e1, 1e2, 1e3], "gamma": np.logspace(-2, 2, 5)}) kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5, param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3], "gamma": np.logspace(-2, 2, 5)}) t0 = time.time() svr.fit(X[:train_size], y[:train_size]) svr_fit = time.time() - t0 print("SVR complexity and bandwidth selected and model fitted in %.3f s" % svr_fit) t0 = time.time() kr.fit(X[:train_size], y[:train_size]) kr_fit = time.time() - t0 print("KRR complexity and bandwidth selected and model fitted in %.3f s" % kr_fit) sv_ratio = svr.best_estimator_.support_.shape[0] / train_size print("Support vector ratio: %.3f" % sv_ratio) t0 = time.time() y_svr = svr.predict(X_plot) svr_predict = time.time() - t0 print("SVR prediction for %d inputs in %.3f s" % (X_plot.shape[0], svr_predict)) t0 = time.time() y_kr = kr.predict(X_plot) kr_predict = time.time() - t0 print("KRR prediction for %d inputs in %.3f s" % (X_plot.shape[0], kr_predict)) ############################################################################# # look at the results sv_ind = svr.best_estimator_.support_ plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors') plt.scatter(X[:100], y[:100], c='k', label='data') plt.hold('on') plt.plot(X_plot, y_svr, c='r', label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict)) plt.plot(X_plot, y_kr, c='g', label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict)) plt.xlabel('data') plt.ylabel('target') plt.title('SVR versus Kernel Ridge') plt.legend() # Visualize training and prediction time plt.figure() # Generate sample data X = 5 * rng.rand(10000, 1) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5)) sizes = np.logspace(1, 4, 7) for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1, gamma=10), "SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items(): train_time = [] test_time = [] for train_test_size in sizes: t0 = time.time() estimator.fit(X[:train_test_size], y[:train_test_size]) train_time.append(time.time() - t0) t0 = time.time() estimator.predict(X_plot[:1000]) test_time.append(time.time() - t0) plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g", label="%s (train)" % name) plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g", label="%s (test)" % name) plt.xscale("log") plt.yscale("log") plt.xlabel("Train size") plt.ylabel("Time (seconds)") plt.title('Execution Time') plt.legend(loc="best") # Visualize learning curves plt.figure() svr = SVR(kernel='rbf', C=1e1, gamma=0.1) kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1) train_sizes, train_scores_svr, test_scores_svr = \ learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10), scoring="mean_squared_error", cv=10) train_sizes_abs, train_scores_kr, test_scores_kr = \ learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10), scoring="mean_squared_error", cv=10) plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r", label="SVR") plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g", label="KRR") plt.xlabel("Train size") plt.ylabel("Mean Squared Error") plt.title('Learning curves') plt.legend(loc="best") plt.show()
bsd-3-clause
Ektorus/bohrium
ve/cpu/tools/locate.py
1
8762
from __future__ import print_function ## 3D Lattice Boltzmann (BGK) model of a fluid. ## D3Q19 model. At each timestep, particle densities propagate ## outwards in the directions indicated in the figure. An ## equivalent 'equilibrium' density is found, and the densities ## relax towards that state, in a proportion governed by omega. ## Iain Haslam, March 2006. import util if util.Benchmark().bohrium: import bohrium as np else: import numpy as np def main(): B = util.Benchmark() nx = B.size[0] ny = B.size[1] nz = B.size[2] ITER = B.size[3] NO_OBST = 1 omega = 1.0 density = 1.0 deltaU = 1e-7 t1 = 1/3.0 t2 = 1/18.0 t3 = 1/36.0 B.start() F = np.ones((19, nx, ny, nz), dtype=np.float64) F[:] = density/19.0 FEQ = np.ones((19, nx, ny, nz), dtype=np.float64) FEQ[:] = density/19.0 T = np.zeros((19, nx, ny, nz), dtype=np.float64) #Create the scenery. BOUND = np.zeros((nx, ny, nz), dtype=np.float64) BOUNDi = np.ones((nx, ny, nz), dtype=np.float64) """ if not NO_OBST: for i in xrange(nx): for j in xrange(ny): for k in xrange(nz): if ((i-4)**2+(j-5)**2+(k-6)**2) < 6: BOUND[i,j,k] += 1.0 BOUNDi[i,j,k] += 0.0 BOUND[:,0,:] += 1.0 BOUNDi[:,0,:] *= 0.0 """ if util.Benchmark().bohrium: np.flush() for ts in xrange(0, ITER): ##Propagate / Streaming step T[:] = F #nearest-neighbours F[1,:,:,0] = T[1,:,:,-1] F[1,:,:,1:] = T[1,:,:,:-1] F[2,:,:,:-1] = T[2,:,:,1:] F[2,:,:,-1] = T[2,:,:,0] F[3,:,0,:] = T[3,:,-1,:] F[3,:,1:,:] = T[3,:,:-1,:] F[4,:,:-1,:] = T[4,:,1:,:] F[4,:,-1,:] = T[4,:,0,:] F[5,0,:,:] = T[5,-1,:,:] F[5,1:,:,:] = T[5,:-1,:,:] F[6,:-1,:,:] = T[6,1:,:,:] F[6,-1,:,:] = T[6,0,:,:] #next-nearest neighbours F[7,0 ,0 ,:] = T[7,-1 , -1,:] F[7,0 ,1:,:] = T[7,-1 ,:-1,:] F[7,1:,0 ,:] = T[7,:-1, -1,:] F[7,1:,1:,:] = T[7,:-1,:-1,:] F[8,0 ,:-1,:] = T[8,-1 ,1:,:] F[8,0 , -1,:] = T[8,-1 ,0 ,:] F[8,1:,:-1,:] = T[8,:-1,1:,:] F[8,1:, -1,:] = T[8,:-1,0 ,:] F[9,:-1,0 ,:] = T[9,1:, -1,:] F[9,:-1,1:,:] = T[9,1:,:-1,:] F[9,-1 ,0 ,:] = T[9,0 , 0,:] F[9,-1 ,1:,:] = T[9,0 ,:-1,:] F[10,:-1,:-1,:] = T[10,1:,1:,:] F[10,:-1, -1,:] = T[10,1:,0 ,:] F[10,-1 ,:-1,:] = T[10,0 ,1:,:] F[10,-1 , -1,:] = T[10,0 ,0 ,:] F[11,0 ,:,0 ] = T[11,0 ,:, -1] F[11,0 ,:,1:] = T[11,0 ,:,:-1] F[11,1:,:,0 ] = T[11,:-1,:, -1] F[11,1:,:,1:] = T[11,:-1,:,:-1] F[12,0 ,:,:-1] = T[12, -1,:,1:] F[12,0 ,:, -1] = T[12, -1,:,0 ] F[12,1:,:,:-1] = T[12,:-1,:,1:] F[12,1:,:, -1] = T[12,:-1,:,0 ] F[13,:-1,:,0 ] = T[13,1:,:, -1] F[13,:-1,:,1:] = T[13,1:,:,:-1] F[13, -1,:,0 ] = T[13,0 ,:, -1] F[13, -1,:,1:] = T[13,0 ,:,:-1] F[14,:-1,:,:-1] = T[14,1:,:,1:] F[14,:-1,:, -1] = T[14,1:,:,0 ] F[14,-1 ,:,:-1] = T[14,0 ,:,1:] F[14,-1 ,:, -1] = T[14,0 ,:,0 ] F[15,:,0 ,0 ] = T[15,:, -1, -1] F[15,:,0 ,1:] = T[15,:, -1,:-1] F[15,:,1:,0 ] = T[15,:,:-1, -1] F[15,:,1:,1:] = T[15,:,:-1,:-1] F[16,:,0 ,:-1] = T[16,:, -1,1:] F[16,:,0 , -1] = T[16,:, -1,0 ] F[16,:,1:,:-1] = T[16,:,:-1,1:] F[16,:,1:, -1] = T[16,:,:-1,0 ] F[17,:,:-1,0 ] = T[17,:,1:, -1] F[17,:,:-1,1:] = T[17,:,1:,:-1] F[17,:, -1,0 ] = T[17,:,0 , -1] F[17,:, -1,1:] = T[17,:,0 ,:-1] F[18,:,:-1,:-1] = T[18,:,1:,1:] F[18,:,:-1, -1] = T[18,:,1:,0 ] F[18,:,-1 ,:-1] = T[18,:,0 ,1:] F[18,:,-1 , -1] = T[18,:,0 ,0 ] #Densities bouncing back at next timestep BB = np.empty(F.shape) T[:] = F T[1:,:,:,:] *= BOUND[np.newaxis,:,:,:] BB[2 ,:,:,:] += T[1 ,:,:,:] BB[1 ,:,:,:] += T[2 ,:,:,:] BB[4 ,:,:,:] += T[3 ,:,:,:] BB[3 ,:,:,:] += T[4 ,:,:,:] BB[6 ,:,:,:] += T[5 ,:,:,:] BB[5 ,:,:,:] += T[6 ,:,:,:] BB[10,:,:,:] += T[7 ,:,:,:] BB[9 ,:,:,:] += T[8 ,:,:,:] BB[8 ,:,:,:] += T[9 ,:,:,:] BB[7 ,:,:,:] += T[10,:,:,:] BB[14,:,:,:] += T[11,:,:,:] BB[13,:,:,:] += T[12,:,:,:] BB[12,:,:,:] += T[13,:,:,:] BB[11,:,:,:] += T[14,:,:,:] BB[18,:,:,:] += T[15,:,:,:] BB[17,:,:,:] += T[16,:,:,:] BB[16,:,:,:] += T[17,:,:,:] BB[15,:,:,:] += T[18,:,:,:] # Relax calculate equilibrium state (FEQ) with equivalent speed and density to F DENSITY = np.add.reduce(F) #UX = F[5,:,:,:].copy() UX = np.ones(F[5,:,:,:].shape, dtype=np.float64) UX[:,:,:] = F[5,:,:,:] UX += F[7,:,:,:] UX += F[8,:,:,:] UX += F[11,:,:,:] UX += F[12,:,:,:] UX -= F[6,:,:,:] UX -= F[9,:,:,:] UX -= F[10,:,:,:] UX -= F[13,:,:,:] UX -= F[14,:,:,:] UX /=DENSITY #UY = F[3,:,:,:].copy() UY = np.ones(F[3,:,:,:].shape, dtype=np.float64) UY[:,:,:] = F[3,:,:,:] UY += F[7,:,:,:] UY += F[9,:,:,:] UY += F[15,:,:,:] UY += F[16,:,:,:] UY -= F[4,:,:,:] UY -= F[8,:,:,:] UY -= F[10,:,:,:] UY -= F[17,:,:,:] UY -= F[18,:,:,:] UY /=DENSITY #UZ = F[1,:,:,:].copy() UZ = np.ones(F[1,:,:,:].shape, dtype=np.float64) UZ[:,:,:] = F[1,:,:,:] UZ += F[11,:,:,:] UZ += F[13,:,:,:] UZ += F[15,:,:,:] UZ += F[17,:,:,:] UZ -= F[2,:,:,:] UZ -= F[12,:,:,:] UZ -= F[14,:,:,:] UZ -= F[16,:,:,:] UZ -= F[18,:,:,:] UZ /=DENSITY UX[0,:,:] += deltaU #Increase inlet pressure #Set bourderies to zero. UX[:,:,:] *= BOUNDi UY[:,:,:] *= BOUNDi UZ[:,:,:] *= BOUNDi DENSITY[:,:,:] *= BOUNDi U_SQU = UX**2 + UY**2 + UZ**2 # Calculate equilibrium distribution: stationary FEQ[0,:,:,:] = (t1*DENSITY)*(1.0-3.0*U_SQU/2.0) # nearest-neighbours T1 = 3.0/2.0*U_SQU tDENSITY = t2*DENSITY FEQ[1,:,:,:]=tDENSITY*(1.0 + 3.0*UZ + 9.0/2.0*UZ**2 - T1) FEQ[2,:,:,:]=tDENSITY*(1.0 - 3.0*UZ + 9.0/2.0*UZ**2 - T1) FEQ[3,:,:,:]=tDENSITY*(1.0 + 3.0*UY + 9.0/2.0*UY**2 - T1) FEQ[4,:,:,:]=tDENSITY*(1.0 - 3.0*UY + 9.0/2.0*UY**2 - T1) FEQ[5,:,:,:]=tDENSITY*(1.0 + 3.0*UX + 9.0/2.0*UX**2 - T1) FEQ[6,:,:,:]=tDENSITY*(1.0 - 3.0*UX + 9.0/2.0*UX**2 - T1) # next-nearest neighbours T1 = 3.0*U_SQU/2.0 tDENSITY = t3*DENSITY U8 = UX+UY FEQ[7,:,:,:] =tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1) U9 = UX-UY FEQ[8,:,:,:] =tDENSITY*(1.0 + 3.0*U9 + 9.0/2.0*(U9)**2 - T1) U10 = -UX+UY FEQ[9,:,:,:] =tDENSITY*(1.0 + 3.0*U10 + 9.0/2.0*(U10)**2 - T1) U8 *= -1.0 FEQ[10,:,:,:]=tDENSITY*(1.0 + 3.0*U8 + 9.0/2.0*(U8)**2 - T1) U12 = UX+UZ FEQ[11,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1) U12 *= 1.0 FEQ[14,:,:,:]=tDENSITY*(1.0 + 3.0*U12 + 9.0/2.0*(U12)**2 - T1) U13 = UX-UZ FEQ[12,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1) U13 *= -1.0 FEQ[13,:,:,:]=tDENSITY*(1.0 + 3.0*U13 + 9.0/2.0*(U13)**2 - T1) U16 = UY+UZ FEQ[15,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1) U17 = UY-UZ FEQ[16,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1) U17 *= -1.0 FEQ[17,:,:,:]=tDENSITY*(1.0 + 3.0*U17 + 9.0/2.0*(U17)**2 - T1) U16 *= -1.0 FEQ[18,:,:,:]=tDENSITY*(1.0 + 3.0*U16 + 9.0/2.0*(U16)**2 - T1) F *= (1.0-omega) F += omega * FEQ #Densities bouncing back at next timestep F[1:,:,:,:] *= BOUNDi[np.newaxis,:,:,:] F[1:,:,:,:] += BB[1:,:,:,:] del BB del T1 del UX, UY, UZ del U_SQU del DENSITY, tDENSITY del U8, U9, U10, U12, U13, U16, U17 if util.Benchmark().bohrium: np.flush() B.stop() B.pprint() if B.outputfn: B.tofile(B.outputfn, {'res': UX}) """ import matplotlib.pyplot as plt UX *= -1 plt.hold(True) plt.quiver(UY[:,:,4],UX[:,:,4], pivot='middle') plt.imshow(BOUND[:,:,4]) plt.show() """ if __name__ == "__main__": main()
lgpl-3.0
andrewnc/scikit-learn
examples/plot_isotonic_regression.py
303
1767
""" =================== Isotonic Regression =================== An illustration of the isotonic regression on generated data. The isotonic regression finds a non-decreasing approximation of a function while minimizing the mean squared error on the training data. The benefit of such a model is that it does not assume any form for the target function such as linearity. For comparison a linear regression is also presented. """ print(__doc__) # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from matplotlib.collections import LineCollection from sklearn.linear_model import LinearRegression from sklearn.isotonic import IsotonicRegression from sklearn.utils import check_random_state n = 100 x = np.arange(n) rs = check_random_state(0) y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n)) ############################################################################### # Fit IsotonicRegression and LinearRegression models ir = IsotonicRegression() y_ = ir.fit_transform(x, y) lr = LinearRegression() lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression ############################################################################### # plot result segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)] lc = LineCollection(segments, zorder=0) lc.set_array(np.ones(len(y))) lc.set_linewidths(0.5 * np.ones(n)) fig = plt.figure() plt.plot(x, y, 'r.', markersize=12) plt.plot(x, y_, 'g.-', markersize=12) plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-') plt.gca().add_collection(lc) plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right') plt.title('Isotonic regression') plt.show()
bsd-3-clause
macks22/scikit-learn
examples/manifold/plot_mds.py
261
2616
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Licence: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20) plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g') plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b') plt.legend(('True position', 'MDS', 'NMDS'), loc='best') similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
justincassidy/scikit-learn
examples/mixture/plot_gmm_pdf.py
284
1528
""" ============================================= Density Estimation for a mixture of Gaussians ============================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0., -0.7], [3.5, .7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GMM(n_components=2, covariance_type='full') clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20.0, 30.0) y = np.linspace(-20.0, 40.0) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX)[0] Z = Z.reshape(X.shape) CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)) CB = plt.colorbar(CS, shrink=0.8, extend='both') plt.scatter(X_train[:, 0], X_train[:, 1], .8) plt.title('Negative log-likelihood predicted by a GMM') plt.axis('tight') plt.show()
bsd-3-clause
Silmathoron/nest-simulator
pynest/examples/spatial/grid_iaf_irr.py
20
1453
# -*- coding: utf-8 -*- # # grid_iaf_irr.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Create 12 freely placed iaf_psc_alpha neurons ----------------------------------------------- BCCN Tutorial @ CNS*09 Hans Ekkehard Plesser, UMB """ import nest import matplotlib.pyplot as plt nest.ResetKernel() pos = nest.spatial.free([nest.random.uniform(-0.75, 0.75), nest.random.uniform(-0.5, 0.5)], extent=[2., 1.5]) l1 = nest.Create('iaf_psc_alpha', 12, positions=pos) nest.PrintNodes() nest.PlotLayer(l1, nodesize=50) # beautify plt.axis([-1.0, 1.0, -0.75, 0.75]) plt.axes().set_aspect('equal', 'box') plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75)) plt.axes().set_yticks((-0.5, 0, 0.5)) plt.grid(True) plt.xlabel('Extent: 2.0') plt.ylabel('Extent: 1.5') plt.show() # plt.savefig('grid_iaf_irr.png')
gpl-2.0
michaelyin/code-for-blog
2008/wx_mpl_bars.py
12
7994
""" This demo demonstrates how to embed a matplotlib (mpl) plot into a wxPython GUI application, including: * Using the navigation toolbar * Adding data to the plot * Dynamically modifying the plot's properties * Processing mpl events * Saving the plot to a file from a menu The main goal is to serve as a basis for developing rich wx GUI applications featuring mpl plots (using the mpl OO API). Eli Bendersky (eliben@gmail.com) License: this code is in the public domain Last modified: 30.07.2008 """ import os import pprint import random import wx # The recommended way to use wx with mpl is with the WXAgg # backend. # import matplotlib matplotlib.use('WXAgg') from matplotlib.figure import Figure from matplotlib.backends.backend_wxagg import \ FigureCanvasWxAgg as FigCanvas, \ NavigationToolbar2WxAgg as NavigationToolbar class BarsFrame(wx.Frame): """ The main frame of the application """ title = 'Demo: wxPython with matplotlib' def __init__(self): wx.Frame.__init__(self, None, -1, self.title) self.data = [5, 6, 9, 14] self.create_menu() self.create_status_bar() self.create_main_panel() self.textbox.SetValue(' '.join(map(str, self.data))) self.draw_figure() def create_menu(self): self.menubar = wx.MenuBar() menu_file = wx.Menu() m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file") self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt) menu_file.AppendSeparator() m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit") self.Bind(wx.EVT_MENU, self.on_exit, m_exit) menu_help = wx.Menu() m_about = menu_help.Append(-1, "&About\tF1", "About the demo") self.Bind(wx.EVT_MENU, self.on_about, m_about) self.menubar.Append(menu_file, "&File") self.menubar.Append(menu_help, "&Help") self.SetMenuBar(self.menubar) def create_main_panel(self): """ Creates the main panel with all the controls on it: * mpl canvas * mpl navigation toolbar * Control panel for interaction """ self.panel = wx.Panel(self) # Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigCanvas(self.panel, -1, self.fig) # Since we have only one plot, we can use add_axes # instead of add_subplot, but then the subplot # configuration tool in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Bind the 'pick' event for clicking on one of the bars # self.canvas.mpl_connect('pick_event', self.on_pick) self.textbox = wx.TextCtrl( self.panel, size=(200,-1), style=wx.TE_PROCESS_ENTER) self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.textbox) self.drawbutton = wx.Button(self.panel, -1, "Draw!") self.Bind(wx.EVT_BUTTON, self.on_draw_button, self.drawbutton) self.cb_grid = wx.CheckBox(self.panel, -1, "Show Grid", style=wx.ALIGN_RIGHT) self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid) self.slider_label = wx.StaticText(self.panel, -1, "Bar width (%): ") self.slider_width = wx.Slider(self.panel, -1, value=20, minValue=1, maxValue=100, style=wx.SL_AUTOTICKS | wx.SL_LABELS) self.slider_width.SetTickFreq(10, 1) self.Bind(wx.EVT_COMMAND_SCROLL_THUMBTRACK, self.on_slider_width, self.slider_width) # Create the navigation toolbar, tied to the canvas # self.toolbar = NavigationToolbar(self.canvas) # # Layout with box sizers # self.vbox = wx.BoxSizer(wx.VERTICAL) self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW) self.vbox.Add(self.toolbar, 0, wx.EXPAND) self.vbox.AddSpacer(10) self.hbox = wx.BoxSizer(wx.HORIZONTAL) flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL self.hbox.Add(self.textbox, 0, border=3, flag=flags) self.hbox.Add(self.drawbutton, 0, border=3, flag=flags) self.hbox.Add(self.cb_grid, 0, border=3, flag=flags) self.hbox.AddSpacer(30) self.hbox.Add(self.slider_label, 0, flag=flags) self.hbox.Add(self.slider_width, 0, border=3, flag=flags) self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.TOP) self.panel.SetSizer(self.vbox) self.vbox.Fit(self) def create_status_bar(self): self.statusbar = self.CreateStatusBar() def draw_figure(self): """ Redraws the figure """ str = self.textbox.GetValue() self.data = map(int, str.split()) x = range(len(self.data)) # clear the axes and redraw the plot anew # self.axes.clear() self.axes.grid(self.cb_grid.IsChecked()) self.axes.bar( left=x, height=self.data, width=self.slider_width.GetValue() / 100.0, align='center', alpha=0.44, picker=5) self.canvas.draw() def on_cb_grid(self, event): self.draw_figure() def on_slider_width(self, event): self.draw_figure() def on_draw_button(self, event): self.draw_figure() def on_pick(self, event): # The event received here is of the type # matplotlib.backend_bases.PickEvent # # It carries lots of information, of which we're using # only a small amount here. # box_points = event.artist.get_bbox().get_points() msg = "You've clicked on a bar with coords:\n %s" % box_points dlg = wx.MessageDialog( self, msg, "Click!", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def on_text_enter(self, event): self.draw_figure() def on_save_plot(self, event): file_choices = "PNG (*.png)|*.png" dlg = wx.FileDialog( self, message="Save plot as...", defaultDir=os.getcwd(), defaultFile="plot.png", wildcard=file_choices, style=wx.SAVE) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() self.canvas.print_figure(path, dpi=self.dpi) self.flash_status_message("Saved to %s" % path) def on_exit(self, event): self.Destroy() def on_about(self, event): msg = """ A demo using wxPython with matplotlib: * Use the matplotlib navigation bar * Add values to the text box and press Enter (or click "Draw!") * Show or hide the grid * Drag the slider to modify the width of the bars * Save the plot to a file using the File menu * Click on a bar to receive an informative message """ dlg = wx.MessageDialog(self, msg, "About", wx.OK) dlg.ShowModal() dlg.Destroy() def flash_status_message(self, msg, flash_len_ms=1500): self.statusbar.SetStatusText(msg) self.timeroff = wx.Timer(self) self.Bind( wx.EVT_TIMER, self.on_flash_status_off, self.timeroff) self.timeroff.Start(flash_len_ms, oneShot=True) def on_flash_status_off(self, event): self.statusbar.SetStatusText('') if __name__ == '__main__': app = wx.PySimpleApp() app.frame = BarsFrame() app.frame.Show() app.MainLoop()
unlicense
rhattersley/iris
lib/iris/tests/unit/plot/__init__.py
9
4522
# (C) British Crown Copyright 2014 - 2016, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Unit tests for the :mod:`iris.plot` module.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from iris.plot import _broadcast_2d as broadcast from iris.coords import AuxCoord from iris.tests.stock import simple_2d, lat_lon_cube @tests.skip_plot class TestGraphicStringCoord(tests.GraphicsTest): def setUp(self): super(TestGraphicStringCoord, self).setUp() self.cube = simple_2d(with_bounds=True) self.cube.add_aux_coord(AuxCoord(list('abcd'), long_name='str_coord'), 1) self.lat_lon_cube = lat_lon_cube() def tick_loc_and_label(self, axis_name, axes=None): # Intentional lazy import so that subclasses can have an opportunity # to change the backend. import matplotlib.pyplot as plt # Draw the plot to 'fix' the ticks. if axes: axes.figure.canvas.draw() else: axes = plt.gca() plt.draw() axis = getattr(axes, axis_name) locations = axis.get_majorticklocs() labels = [tick.get_text() for tick in axis.get_ticklabels()] return list(zip(locations, labels)) def assertBoundsTickLabels(self, axis, axes=None): actual = self.tick_loc_and_label(axis, axes) expected = [(-1.0, ''), (0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd'), (4.0, '')] self.assertEqual(expected, actual) def assertPointsTickLabels(self, axis, axes=None): actual = self.tick_loc_and_label(axis, axes) expected = [(0.0, 'a'), (1.0, 'b'), (2.0, 'c'), (3.0, 'd')] self.assertEqual(expected, actual) @tests.skip_plot class MixinCoords(object): """ Mixin class of common plotting tests providing 2-dimensional permutations of coordinates and anonymous dimensions. """ def _check(self, u, v, data=None): self.assertEqual(self.mpl_patch.call_count, 1) if data is not None: (actual_u, actual_v, actual_data), _ = self.mpl_patch.call_args self.assertArrayEqual(actual_data, data) else: (actual_u, actual_v), _ = self.mpl_patch.call_args self.assertArrayEqual(actual_u, u) self.assertArrayEqual(actual_v, v) def test_foo_bar(self): self.draw_func(self.cube, coords=('foo', 'bar')) u, v = broadcast(self.foo, self.bar) self._check(u, v, self.data) def test_bar_foo(self): self.draw_func(self.cube, coords=('bar', 'foo')) u, v = broadcast(self.bar, self.foo) self._check(u, v, self.dataT) def test_foo_0(self): self.draw_func(self.cube, coords=('foo', 0)) u, v = broadcast(self.foo, self.bar_index) self._check(u, v, self.data) def test_1_bar(self): self.draw_func(self.cube, coords=(1, 'bar')) u, v = broadcast(self.foo_index, self.bar) self._check(u, v, self.data) def test_1_0(self): self.draw_func(self.cube, coords=(1, 0)) u, v = broadcast(self.foo_index, self.bar_index) self._check(u, v, self.data) def test_0_foo(self): self.draw_func(self.cube, coords=(0, 'foo')) u, v = broadcast(self.bar_index, self.foo) self._check(u, v, self.dataT) def test_bar_1(self): self.draw_func(self.cube, coords=('bar', 1)) u, v = broadcast(self.bar, self.foo_index) self._check(u, v, self.dataT) def test_0_1(self): self.draw_func(self.cube, coords=(0, 1)) u, v = broadcast(self.bar_index, self.foo_index) self._check(u, v, self.dataT)
lgpl-3.0
ctogle/dilapidator
test/geometry/quat_tests.py
1
6809
from dilap.geometry.quat import quat from dilap.geometry.vec3 import vec3 import dilap.geometry.tools as dpr import matplotlib.pyplot as plt import unittest,numpy,math import pdb #python3 -m unittest discover -v ./ "*tests.py" class test_quat(unittest.TestCase): def test_av(self): a = 3*dpr.PI4 u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1) q1,q2 = quat(0,0,0,0).av(a,u1),quat(0,0,0,0).av(a,u2) q3,q4 = quat(0,0,0,0).av(-a,u3),quat(0,0,0,0).av(-a,u2) self.assertTrue(q1.w > 0.1) self.assertTrue(q1.x > 0.1) self.assertTrue(dpr.isnear(q1.y,0)) self.assertTrue(dpr.isnear(q1.z,0)) self.assertTrue(q2.w > 0.1) self.assertTrue(dpr.isnear(q2.x,0)) self.assertTrue(q2.y < -0.1) self.assertTrue(dpr.isnear(q2.z,0)) self.assertTrue(q3.w > 0.1) self.assertTrue(dpr.isnear(q3.x,0)) self.assertTrue(dpr.isnear(q3.y,0)) self.assertTrue(q3.z < -0.1) self.assertFalse(q2 == q4.cp().flp()) self.assertTrue(q2 == q4.cnj()) def test_uu(self): u1,u2,u3 = vec3(1,0,0),vec3(0,-1,0),vec3(0,0,1) q1,q2 = quat(0,0,0,0).uu(u1,u2),quat(0,0,0,0).uu(u1,u3) q3,q4 = quat(0,0,0,0).uu(u2,u3),quat(0,0,0,0).uu(u3,u2) self.assertTrue(q1.w > 0.1) self.assertTrue(dpr.isnear(q1.x,0)) self.assertTrue(dpr.isnear(q1.y,0)) self.assertTrue(q1.z < -0.1) self.assertTrue(q2.w > 0.1) self.assertTrue(dpr.isnear(q2.x,0)) self.assertTrue(q2.y < -0.1) self.assertTrue(dpr.isnear(q2.z,0)) self.assertTrue(q3 == q4.cnj()) def test_toxy(self): q1 = quat(0,0,0,0).toxy(vec3(0,0,-1)) #print('toxy\v\t',q1) self.assertEqual(q1.w,0) self.assertEqual(q1.x,1) def test_cp(self): q1 = quat(1,2,3,4) self.assertTrue(q1 is q1) self.assertFalse(q1 is q1.cp()) self.assertTrue(q1 == q1.cp()) #def test_cpf(self): def test_isnear(self): q1,q2 = quat(1,1,1,0),quat(1,1,1,0.1) q3,q4 = quat(1,1,1,1),quat(1,1.000001,1,1) self.assertEqual(q1.isnear(q1),1) self.assertEqual(q3.isnear(q3),1) self.assertEqual(q1.isnear(q2),0) self.assertEqual(q2.isnear(q1),0) self.assertEqual(q1.isnear(q3),0) self.assertEqual(q2.isnear(q3),0) self.assertEqual(q2.isnear(q4),0) self.assertEqual(q3.isnear(q4),1) def test_mag2(self): q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11) self.assertEqual(dpr.isnear(q1.mag2(),1),1) self.assertEqual(dpr.isnear(q2.mag2(),3),1) self.assertEqual(dpr.isnear(q3.mag2(),150),1) def test_mag(self): q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11) self.assertEqual(dpr.isnear(q1.mag(),1),1) self.assertEqual(dpr.isnear(q2.mag(),math.sqrt(3)),1) self.assertEqual(dpr.isnear(q3.mag(),math.sqrt(150)),1) def test_nrm(self): q1,q2,q3 = quat(1,0,0,0),quat(1,1,1,0),quat(0,2,5,11) self.assertEqual(dpr.isnear(q1.cp().nrm().mag(),1),1) self.assertEqual(dpr.isnear(q2.cp().nrm().mag(),1),1) self.assertEqual(dpr.isnear(q3.cp().nrm().mag(),1),1) self.assertTrue(q1.cp().nrm().mag() == q1.mag()) self.assertTrue(q1.nrm() is q1) self.assertFalse(q2.cp().nrm().mag() == q2.mag()) self.assertTrue(q2.nrm() is q2) self.assertFalse(q3.cp().nrm().mag() == q3.mag()) self.assertTrue(q3.nrm() is q3) def test_flp(self): q1,q2 = quat(1,0,0,0),quat(1,1,1,0) q3,q4 = quat(0,2,5,11),quat(-1,1,1,0) self.assertFalse(q1.cp().flp() == q1) self.assertFalse(q2.cp().flp() == q2) self.assertTrue(q3.cp().flp() == q3) self.assertFalse(q4.cp().flp() == q4) self.assertTrue(q2.cp().flp() == q4) self.assertTrue(q1.flp() is q1) self.assertTrue(q2.flp() is q2) self.assertTrue(q3.flp() is q3) self.assertTrue(q4.flp() is q4) def test_uscl(self): q1,q2 = quat(1,0,0,0),quat(1,1,1,0) q3,q4 = quat(0,2,5,11),quat(0,1,2.5,5.5) self.assertTrue(q1.cp().uscl(1) == q1) self.assertFalse(q1.cp().uscl(3) == q1) self.assertTrue(q2.cp().uscl(1) == q2) self.assertFalse(q2.cp().uscl(3) == q2) self.assertTrue(q3.cp().uscl(0.5) == q4) self.assertTrue(q1.uscl(1) is q1) def test_cnj(self): q1,q2 = quat(1,0,0,0),quat(1,1,1,0) q3,q4 = quat(-1,2,5,11),quat(1,-2,-5,-11) self.assertTrue(q1.cp().cnj() == q1) self.assertTrue(q1.cnj() is q1) self.assertFalse(q2.cp().cnj() == q2) self.assertFalse(q3.cnj() == q4) def test_inv(self): a1,v1 = dpr.PI4,vec3(0,0,1) a2,v2 = dpr.threePI4,vec3(0,0,1) q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v2) self.assertEqual(q1.cp().cnj(),q1.inv()) self.assertEqual(q2.cp().cnj(),q2.inv()) self.assertFalse(q1.inv() is q1) def test_add(self): q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5) q3 = quat(1.5,1.4,-0.2,2.5) self.assertEqual(q1.add(q2),q3) self.assertFalse(q1.add(q2) is q1) def test_sub(self): q1,q2 = quat(0.5,0.3,-2.2,3),quat(1,1.1,2,-0.5) q3 = quat(-0.5,-0.8,-4.2,3.5) self.assertEqual(q1.sub(q2),q3) self.assertFalse(q1.sub(q2) is q1) def test_mul(self): a1,v1 = dpr.PI4,vec3(0,0,1) a2,v2 = dpr.threePI4,vec3(0,0,1) q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a2,v1) q3 = quat(0,1,0,0).av(a1+a2,v2) self.assertTrue(q1.mul(q2) == q3) self.assertFalse(q1.mul(q2) is q1) def test_rot(self): a1,v1 = dpr.PI4,vec3(0,0,1) a2,v2 = dpr.PI2,vec3(0,0,1) q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1) q3 = quat(0,1,0,0).av(a2,v2) self.assertTrue(q1.rot(q2) == q3) self.assertTrue(q1.rot(q2) is q1) #def test_rotps(self): def test_dot(self): a1,v1 = dpr.PI4,vec3(0,0,1) a2,v2 = dpr.PI2,vec3(0,1,0) q1,q2 = quat(1,0,0,0).av(a1,v1),quat(1,1,1,0).av(a1,v1) q3 = quat(0,1,0,0).av(a2,v2) q4 = quat(0,1,0,0).av(0,v1) self.assertTrue(dpr.isnear(q1.dot(q2),q1.mag2())) self.assertFalse(dpr.isnear(q1.dot(q3),0)) self.assertTrue(dpr.isnear(q3.dot(q4),q3.w)) def test_slerp(self): a1,v1 = dpr.PI4,vec3(0,0,1) a2,v2 = dpr.PI,vec3(0,0,1) q1,q2 = quat(1,0,0,0).av(0,v1),quat(1,1,1,0).av(a1,v1) q3 = quat(0,1,0,0).av(a2,v2) self.assertEqual(q1.slerp(q3,0.25),q2) self.assertFalse(q1.slerp(q3,0.25) is q1) if __name__ == '__main__': unittest.main()
mit
deehzee/cs231n
assignment2/cs231n/classifiers/neural_net.py
2
13071
import numpy as np import matplotlib.pyplot as plt class TwoLayerNet(object): """ A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - - softmax The outputs of the second fully-connected layer are the scores for each class. """ def __init__(self, input_size, hidden_size, output_size, std=1e-4): """ Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys: W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. """ self.params = {} self.params['W1'] = std * np.random.randn( input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = std * np.random.randn( hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def loss(self, X, y=None, reg=0.0): """ Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. """ # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] N, D = X.shape H, C = W2.shape # Compute the forward pass scores = None ############################################################## # TODO: Perform the forward pass, computing the class scores # # for the input. Store the result in the scores variable, # # which should be an array of shape (N, C). # ############################################################## # X = Input (N x D) [input] # X1 = X.W1 + b1 (N x H) [FC] # X2 = ReLU(X1) (N x H) [ReLU] # X3 = X2.W2 + b2 (N x C) [FC] # X4 = softmax(X3) (N x C) [softmax] X1 = X.dot(W1) + b1 # output of layer1 (FC) X2 = np.maximum(0, X1) # output of layer2 (ReLU) X3 = X2.dot(W2) + b2 # output of layer3 (FC) scores = X3 ############################################################## # END OF YOUR CODE # ############################################################## # If the targets are not given then jump out, we're done if y is None: return scores # Compute the loss loss = None ############################################################## # TODO: Finish the forward pass, and compute the loss. This # # should include both the data loss and L2 regularization # # for W1 and W2. Store the result in the variable loss, # # which should be a scalar. Use the Softmax classifier loss. # # So that your results match ours, multiply the # # regularization loss by 0.5 # ############################################################## cs = -np.max(scores, axis=1, keepdims=True) # ^^ Needed for numerical stability exps = np.exp(scores + cs) expsum = np.sum(exps, axis=1, keepdims=True) probs = exps / expsum X4 = probs losses = -np.log(probs[np.arange(N), y]) loss = np.sum(losses) # Normalize loss loss /= N # Add regularization loss loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2)) ############################################################## # END OF YOUR CODE # ############################################################## # Backward pass: compute gradients grads = {} ############################################################## # TODO: Compute the backward pass, computing the derivatives # # of the weights and biases. Store the results in the grads # # dictionary. For example, grads['W1'] should store the # # gradient on W1, and be a matrix of same size # ############################################################## # X = Input (N x D) [input] # X1 = X.W1 + b1 (N x H) [FC] # X2 = ReLU(X1) (N x H) [ReLU] # X3 = X2.W2 + b2 (N x C) [FC] # X4 = softmax(X3) (N x C) [softmax] # dX3 := dL/dX3 (N x C) # dX3[i,j] = -(j==y[i]) + X4[i,j] indicator = np.zeros_like(X4) indicator[np.arange(N), y] += 1 dX3 = -indicator + X4 dX3 /= N # dW2 := dL/dW2 (H x C) # dW2 = X2^t . dX3 dW2 = np.dot(X2.T, dX3) # db2 := dL/db2 (1 x C) # db2 = [1, ..., 1] . dX3 db2 = np.sum(dX3, axis=0) # dX2 := dL/dX2 (N x H) # dL/dX2 = dL/dX3 . W2^t dX2 = np.dot(dX3, W2.T) # dX1 := dL/dX1 (N x H) # dX1 = (X1 > 0) * dX2 dX1 = (X1 > 0) * dX2 # dW1 := dL/dW1 (D x H) # dW1 = X^t . dX1 dW1 = np.dot(X.T, dX1) # db1 := dL/db1 (1 x H) # db1 = [1, ..., 1] . dX1 db1 = np.sum(dX1, axis=0) # Add regulariation to gradients dW1 += reg * W1 dW2 += reg * W2 # Put the results in the return dict grads = { 'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2, } ############################################################## # END OF YOUR CODE # ############################################################## return loss, grads def train(self, X, y, X_val, y_val, learning_rate=1e-3, learning_rate_decay=0.95, reg=1e-5, num_iters=100, batch_size=200, verbose=False): """ Train this neural network using stochastic gradient descent. Inputs: - X: A numpy array of shape (N, D) giving training data. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - X_val: A numpy array of shape (N_val, D) giving validation data. - y_val: A numpy array of shape (N_val,) giving validation labels. - learning_rate: Scalar giving learning rate for optimization. - learning_rate_decay: Scalar giving factor used to decay the learning rate after each epoch. - reg: Scalar giving regularization strength. - num_iters: Number of steps to take when optimizing. - batch_size: Number of training examples to use per step. - verbose: boolean; if true print progress during optimization. """ num_train = X.shape[0] iterations_per_epoch = max(num_train // batch_size, 1) # Use SGD to optimize the parameters in self.model loss_history = [] train_acc_history = [] val_acc_history = [] for it in xrange(num_iters): X_batch = None y_batch = None ########################################################## # TODO: Create a random minibatch of training data and # # labels, storing them in X_batch and y_batch # # respectively. # ########################################################## batch_idxs = np.random.choice(num_train, batch_size) X_batch = X[batch_idxs] y_batch = y[batch_idxs] ########################################################## # END OF YOUR CODE # ########################################################## # Compute loss and gradients using the current minibatch loss, grads = self.loss(X_batch, y=y_batch, reg=reg) loss_history.append(loss) ########################################################## # TODO: Use the gradients in the grads dictionary to # # update the parameters of the network (stored in the # # dictionary self.params) using stochastic gradient # # descent. You'll need to use the gradients stored in # # the grads dictionary defined above. # ########################################################## self.params['W1'] -= learning_rate * grads['W1'] self.params['b1'] -= learning_rate * grads['b1'] self.params['W2'] -= learning_rate * grads['W2'] self.params['b2'] -= learning_rate * grads['b2'] ########################################################## # END OF YOUR CODE # ########################################################## if verbose and it % 100 == 0: print('iteration {}/{}: loss {:f}'.format( it, num_iters, loss)) # Every epoch, check train and val accuracy and decay # learning rate. if it % iterations_per_epoch == 0: # Check accuracy train_acc = (self.predict(X_batch) == y_batch).mean() val_acc = (self.predict(X_val) == y_val).mean() train_acc_history.append(train_acc) val_acc_history.append(val_acc) # Decay learning rate learning_rate *= learning_rate_decay return { 'loss_history': loss_history, 'train_acc_history': train_acc_history, 'val_acc_history': val_acc_history, } def predict(self, X): """ Use the trained weights of this two-layer network to predict labels for data points. For each data point we predict scores for each of the C classes, and assign each data point to the class with the highest score. Inputs: - X: A numpy array of shape (N, D) giving N D-dimensional data points to classify. Returns: - y_pred: A numpy array of shape (N,) giving predicted labels for each of the elements of X. For all i, y_pred[i] = c means that X[i] is predicted to have class c, where 0 <= c < C. """ y_pred = None ############################################################## # TODO: Implement this function; it should be VERY simple! # ############################################################## scores = self.loss(X) y_pred = np.argmax(scores, axis=1) ############################################################## # END OF YOUR CODE # ############################################################## return y_pred
mit