repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
maximus009/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.py | 7 | 17439 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
kyleam/seaborn | seaborn/miscplot.py | 34 | 1498 | from __future__ import division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
ax.set_xticklabels([])
ax.set_yticklabels([])
def puppyplot(grown_up=False):
"""Plot today's daily puppy. Only works in the IPython notebook."""
from .external.six.moves.urllib.request import urlopen
from IPython.display import HTML
try:
from bs4 import BeautifulSoup
url = "http://www.dailypuppy.com"
if grown_up:
url += "/dogs"
html_doc = urlopen(url)
soup = BeautifulSoup(html_doc)
puppy = soup.find("div", {"class": "daily_puppy"})
return HTML(str(puppy.img))
except ImportError:
html = ('<img src="http://cdn-www.dailypuppy.com/dog-images/'
'decker-the-nova-scotia-duck-tolling-retriever_'
'72926_2013-11-04_w450.jpg" style="width:450px;"/>')
return HTML(html)
| bsd-3-clause |
jas02/easybuild-easyblocks | easybuild/easyblocks/x/xmipp.py | 10 | 8904 | ##
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Xmipp, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
@author: Pablo Escobar (sciCORE, SIB, University of Basel)
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import stat
import sys
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, mkdir, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_Xmipp(EasyBlock):
"""
easyblock to install Xmipp
"""
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_Xmipp, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.xmipp_pythonpaths = []
def extract_step(self):
"""Extract Xmipp sources."""
# strip off 'xmipp' part to avoid having everything in a 'xmipp' subdirectory
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_Xmipp, self).extract_step()
def configure_step(self):
"""Set configure options."""
if self.toolchain.mpi_family() == toolchain.INTELMPI:
mpi_bindir = os.path.join(get_software_root('impi'), 'intel64', 'bin')
else:
mpi_bindir = os.path.join(get_software_root(self.toolchain.MPI_MODULE_NAME[0]), 'bin')
root_java = get_software_root("Java")
if not get_software_root("Java"):
raise EasyBuildError("Module for dependency Java not loaded.")
configure_args = ' '.join([
'profile=no fast=yes warn=no release=yes gtest=yes static=no cuda=no debug=no matlab=no',
'LINKERFORPROGRAMS=%s' % os.getenv('CXX'),
'MPI_BINDIR=%s' % mpi_bindir,
'MPI_LIB=mpi',
'JAVA_HOME=%s' % os.getenv('JAVA_HOME'),
'JAVAC=javac',
'CC=%s' % os.getenv('CC'),
# pass $CXXFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CXXFLAGS=%s' % str(os.getenv('CXXFLAGS').split(' ')).replace(' ', ''),
'CXX=%s' % os.getenv('CXX'),
'MPI_CC=%s' % os.getenv('MPICC'),
# pass $CFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CCFLAGS=%s' % str(os.getenv('CFLAGS').split(' ')).replace(' ', ''),
'MPI_CXX=%s' % os.getenv('MPICXX'),
'MPI_INCLUDE=%s' % os.getenv('MPI_INC_DIR'),
'MPI_LIBDIR=%s' % os.getenv('MPI_LIB_DIR'),
'MPI_LINKERFORPROGRAMS=%s' % os.getenv('MPICXX'),
'LIBPATH=%s' % os.getenv('LD_LIBRARY_PATH'),
])
# define list of configure options, which will be passed to Xmipp's install.sh script via --configure-args
self.cfg['configopts'] = configure_args
self.log.info("Configure arguments for Xmipp install.sh script: %s", self.cfg['configopts'])
def build_step(self):
"""No custom build step (see install step)."""
pass
def install_step(self):
"""Build/install Xmipp using provided install.sh script."""
pylibdir = det_pylibdir()
self.xmipp_pythonpaths = [
# location where Python packages will be installed by Xmipp installer
pylibdir,
'protocols',
os.path.join('libraries', 'bindings', 'python'),
]
python_root = get_software_root('Python')
if python_root:
# extend $PYTHONPATH
all_pythonpaths = [os.path.join(self.installdir, p) for p in self.xmipp_pythonpaths]
# required so packages installed as extensions in Pythpn dep are picked up
all_pythonpaths.append(os.path.join(python_root, pylibdir))
all_pythonpaths.append(os.environ.get('PYTHONPATH', ''))
env.setvar('PYTHONPATH', os.pathsep.join(all_pythonpaths))
# location where Python packages will be installed by Xmipp installer must exist already (setuptools)
mkdir(os.path.join(self.installdir, pylibdir), parents=True)
# put dummy xmipp_python script in place if Python is used as a dependency
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir)
xmipp_python = os.path.join(bindir, 'xmipp_python')
xmipp_python_script_body = '\n'.join([
'#!/bin/sh',
'%s/bin/python "$@"' % python_root,
])
write_file(xmipp_python, xmipp_python_script_body)
adjust_permissions(xmipp_python, stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
pyshortver = '.'.join(get_software_version('Python').split('.')[:2])
# make sure Python.h and numpy header are found
env.setvar('CPATH', os.pathsep.join([
os.path.join(python_root, 'include', 'python%s' % pyshortver),
os.path.join(python_root, pylibdir, 'numpy', 'core', 'include'),
os.environ.get('CPATH', ''),
]))
cmd_opts = []
# disable (re)building of supplied dependencies
dep_names = [dep['name'] for dep in self.cfg['dependencies']]
for dep in ['FFTW', 'HDF5', ('libjpeg-turbo', 'jpeg'), ('LibTIFF', 'tiff'), 'matplotlib', 'Python', 'SQLite',
'Tcl', 'Tk']:
if isinstance(dep, tuple):
dep, opt = dep
else:
opt = dep.lower()
# don't check via get_software_root, check listed dependencies directly (relevant for FFTW)
if dep in dep_names:
cmd_opts.append('--%s=false' % opt)
# Python should also provide numpy/mpi4py
if dep == 'Python':
cmd_opts.extend(['--numpy=false', '--mpi4py=false'])
if '--tcl=false' in cmd_opts and '--tk=false' in cmd_opts:
cmd_opts.append('--tcl-tk=false')
# patch install.sh script to inject configure options
# setting $CONFIGURE_ARGS or using --configure-args doesn't work...
for line in fileinput.input('install.sh', inplace=1, backup='.orig.eb'):
line = re.sub(r"^CONFIGURE_ARGS.*$", 'CONFIGURE_ARGS="%s"' % self.cfg['configopts'], line)
sys.stdout.write(line)
cmd = './install.sh -j %s --unattended=true %s' % (self.cfg['parallel'], ' '.join(cmd_opts))
out, _ = run_cmd(cmd, log_all=True, simple=False)
if not re.search("Xmipp has been successfully compiled", out):
raise EasyBuildError("Xmipp installation did not complete successfully?")
def sanity_check_step(self):
"""Custom sanity check for Xmipp."""
custom_paths = {
# incomplete list, random picks, cfr. http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/ListOfProgramsv3
'files': ['bin/xmipp_%s' % x for x in ['compile', 'imagej', 'mpi_run', 'phantom_create',
'transform_filter', 'tomo_project', 'volume_align']],
'dirs': ['lib'],
}
super(EB_Xmipp, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define Xmipp specific variables in generated module file, i.e. XMIPP_HOME."""
txt = super(EB_Xmipp, self).make_module_extra()
txt += self.module_generator.set_environment('XMIPP_HOME', self.installdir)
txt += self.module_generator.prepend_paths('PYTHONPATH', self.xmipp_pythonpaths)
return txt
| gpl-2.0 |
hsiaoyi0504/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
jmschrei/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
thomasaarholt/hyperspy | hyperspy/drawing/signal.py | 4 | 4534 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# This file contains plotting code generic to the BaseSignal class.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from traits.api import Undefined
from hyperspy.drawing.utils import set_axes_decor
def _plot_1D_component(factors, idx, axes_manager, ax=None,
calibrate=True, comp_label=None,
same_window=False):
if ax is None:
ax = plt.gca()
axis = axes_manager.signal_axes[0]
if calibrate:
x = axis.axis
plt.xlabel(axis.units)
else:
x = np.arange(axis.size)
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%i' % idx)
if comp_label and not same_window:
plt.title('%s' % comp_label)
return ax
def _plot_2D_component(factors, idx, axes_manager,
calibrate=True, ax=None,
comp_label=None, cmap=plt.cm.gray,
axes_decor='all'
):
if ax is None:
ax = plt.gca()
axes = axes_manager.signal_axes[::-1]
shape = axes_manager._signal_shape_in_array
extent = None
if calibrate:
extent = (axes[1].low_value,
axes[1].high_value,
axes[0].high_value,
axes[0].low_value)
if comp_label:
plt.title('%s' % idx)
im = ax.imshow(factors[:, idx].reshape(shape),
cmap=cmap, interpolation='nearest',
extent=extent)
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label=None, no_nans=True,
calibrate=True, cmap=plt.cm.gray,
same_window=False, axes_decor='all'):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
axes = axes_manager.navigation_axes
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes[0].low_value,
axes[0].high_value,
axes[1].high_value,
axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape),
cmap=cmap, extent=extent,
interpolation='nearest')
if calibrate:
plt.xlabel(axes[0].units)
plt.ylabel(axes[1].units)
else:
plt.xlabel('pixels')
plt.ylabel('pixels')
if comp_label:
if same_window:
plt.title('%s' % idx)
else:
plt.title('%s #%s' % (comp_label, idx))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes[0].axis
else:
x = np.arange(axes[0].size)
ax.step(x, loadings[idx],
label='%s' % idx)
if comp_label and not same_window:
plt.title('%s #%s' % (comp_label, idx))
plt.ylabel('Score (a. u.)')
if calibrate:
if axes[0].units is not Undefined:
plt.xlabel(axes[0].units)
else:
plt.xlabel('depth')
else:
plt.xlabel('depth')
else:
raise ValueError('View not supported')
| gpl-3.0 |
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/enrichment.py | 1 | 40209 | """
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import math
import random
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
import utils
import bam
from regions import AnnotationGene, AnnotationTranscript, AnnotationExon, RegionWriter, read_gff_file, get_exons, get_introns, gene_slide
import warnings
try:
from shutil import move
except:
from os import rename as move
"""
Differential expression and MA plot visualization module.
"""
def _region_from_dual(self, line):
try:
self.cluster_aux.clear()
self.cluster_aux.read_line(line)
strand = None
if self.stranded_analysis:
strand = self.cluster_aux.strand
ret = Region(self.cluster_aux.name, self.cluster_aux.start, self.cluster_aux.end, name2=self.cluster_aux.name2, strand=strand)
self.cluster_aux.clear()
return ret
except ValueError:
pass #discarding header
def __calc_reg_write(self, region_file, count, calculated_region):
if count > self.region_mintags:
region_file.write(calculated_region.write())
def calculate_region(self):
"""
Calculate a region file using the reads present in the both main files to analyze.
"""
self.logger.info('Generating regions...')
self.sorted_region_path = '%s/calcregion_%s.bed'%(self._output_dir(), os.path.basename(self.current_output_path))
region_file = open(self.sorted_region_path, 'wb')
if self.region_magic:
regwriter = RegionWriter(self.gff_file, region_file, self.region_magic, no_sort=self.no_sort, logger=self.logger, write_as=BED, galaxy_workarounds=self.galaxy_workarounds)
regwriter.write_regions()
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
if self.stranded_analysis:
calculate_region_stranded(self, dual_reader, region_file)
else:
calculate_region_notstranded(self, dual_reader, region_file)
region_file.flush()
def __cr_append(self, regions, region):
regions.append(region)
def calculate_region_notstranded(self, dual_reader, region_file):
calculated_region = Region()
readcount = 1
for line in dual_reader:
if not calculated_region: #first region only
calculated_region = _region_from_dual(self, line)
calculated_region.end += self.proximity
else:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if calculated_region.overlap(new_region):
calculated_region.join(new_region)
readcount += 1
else:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
calculated_region = new_region.copy()
readcount = 1
if calculated_region:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
def calculate_region_stranded(self, dual_reader, region_file):
temp_region_file = open(self.sorted_region_path, 'wb')
region_plus = Region()
region_minus = Region()
regions = []
numreads_plus = 1
numreads_minus = 1
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
for line in dual_reader:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if not (region_plus and new_region.strand == PLUS_STRAND):
region_plus = _region_from_dual(self, line)
elif not (region_plus and new_region.strand == PLUS_STRAND):
region_minus = _region_from_dual(self, line)
else:
if region_plus.overlap(new_region) and region_plus.strand == new_region.strand:
region_plus.join(new_region)
numreads_plus += 1
elif region_minus.overlap(new_region) and region_minus.strand == new_region.strand:
region_minus.join(new_region)
numreads_minus += 1
else:
if new_region.strand == region_plus.strand:
region_plus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_plus, region_plus)
region_plus = new_region.copy()
numreads_plus = 1
else:
region_minus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_minus, region_minus)
region_minus = new_region.copy()
numreads_minus = 1
if region_plus:
region_plus.end -= self.proximity
regions.append(region_plus)
if region_minus:
region_minus.end -= self.proximity
regions.append(region_minus)
regions.sort(key=lambda x:(x.name, x.start, x.end, x.strand))
for region in regions:
region_file.write(region.write())
def get_zscore(x, mean, sd):
if sd > 0:
return float(x-mean)/sd
else:
return 0 #This points are weird anyway
def read_interesting_regions(self, file_path):
regs = []
try:
regs_file = open(file_path, 'r')
for line in regs_file:
regs.append(line.strip())
except IOError as ioerror:
self.logger.warning("Interesting regions file not found")
return regs # memory inefficient if there's a large number of interesting regions
def plot_enrichment(self, file_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import *
from matplotlib import rcParams
rcParams.update({'font.size': 22})
rcParams['legend.fontsize'] = 14
#decide labels
if self.label1:
label_main = self.label1
else:
if self.real_control_path and self.real_experiment_path:
label_main = '%s VS %s'%(os.path.basename(self.real_experiment_path), os.path.basename(self.real_control_path))
else:
label_main = "A VS B"
if self.label2:
label_control = self.label2
else:
if self.replica_path:
label_control = '%s(A) VS %s(A)'%(os.path.basename(self.real_experiment_path), os.path.basename(self.replica_path))
else:
label_control = 'Background distribution'
#self.logger.info("Interesting regions path: %s" % (self.interesting_regions))
interesting_regs = []
if self.interesting_regions:
self.logger.info("Reading interesting regions...")
interesting_regs = read_interesting_regions(self, self.interesting_regions)
#self.logger.info("Interesting regions: %s" % (interesting_regs))
#self.logger.info("Plot path: %s" % (file_path))
interesting_A = []
interesting_M = []
#self.logger.info("disable_significant: %s" % (self.disable_significant_color))
A = []
A_prime = []
M = []
M_significant = []
A_significant = []
M_prime = []
A_medians = []
points = []
minus_points = []
all_points = []
figure(figsize=(14,22))
biggest_A = -sys.maxint #for drawing
smallest_A = sys.maxint #for drawing
biggest_M = 0 #for drawing
self.logger.info("Loading table...")
for line in open(file_path):
sline = line.split()
try:
enrich = dict(zip(enrichment_keys, sline))
# WARNING: for slide inter and slide intra: name2 = 'start:end' (no gene_id, FIXME?)
name2 = enrich['name2'].split(':')
gene_id = name2[0]
if len(name2) >= 2:
transcript_id = name2[1] # consider transcript_id? (exons)
else:
transcript_id = None
if gene_id in interesting_regs or transcript_id in interesting_regs:
interesting_M.append(float(enrich["M"]))
interesting_A.append(float(enrich["A"]))
biggest_A = max(biggest_A, float(enrich["A"]))
smallest_A = min(smallest_A, float(enrich["A"]))
biggest_M = max(biggest_M, abs(float(enrich["M"])))
biggest_A = max(biggest_A, float(enrich["A_prime"]))
smallest_A = min(smallest_A, float(enrich["A_prime"]))
biggest_M = max(biggest_M, abs(float(enrich["M_prime"])))
positive_point = self.zscore*float(enrich["sd"])+float(enrich["mean"])
negative_point = -self.zscore*float(enrich["sd"])+float(enrich["mean"])
A_median = float(enrich["A_median"])
all_points.append((A_median, positive_point, negative_point))
if abs(float(enrich["zscore"])) < self.zscore:
M.append(float(enrich["M"]))
A.append(float(enrich["A"]))
else:
M_significant.append(float(enrich["M"]))
A_significant.append(float(enrich["A"]))
M_prime.append(float(enrich["M_prime"]))
A_prime.append(float(enrich["A_prime"]))
except ValueError:
pass #to skip the header
all_points.sort(key= lambda x:x[0])
for t in all_points:
(A_medians.append(t[0]), points.append(t[1]), minus_points.append(t[2]))
if points:
margin = 1.1
A_medians.append(biggest_A*margin)
points.append(points[-1])
minus_points.append(minus_points[-1])
A_medians.insert(0, smallest_A)
points.insert(0, points[0])
minus_points.insert(0, minus_points[0])
self.logger.info("Plotting points...")
#Background plot
subplot(211, axisbg="lightyellow")
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A_prime, M_prime, '.', label=label_control, color = '#666666')
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
axhline(0, linestyle='--', color="grey", alpha=0.75)
leg = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4, mode="expand")
leg.get_frame().set_alpha(0.5)
#Experiment plot
subplot(212, axisbg="lightyellow")
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A, M, 'k.', label=label_main)
if self.disable_significant_color:
significant_marker = 'ko'
else:
significant_marker = 'ro'
plot(A_significant, M_significant, significant_marker, label="%s (significant)"%label_main)
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
if self.interesting_regions:
interesting_label = label_main + ' (interesting)'
plot(interesting_A, interesting_M, 'H', label=interesting_label, color='#00EE00') # plotting "interesting" regions
axhline(0, linestyle='--', color="grey", alpha=0.75)
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
leg2 = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4)
leg2.get_frame().set_alpha(0.7)
self._save_figure("enrichment_MA", width=500, height=2800)
else:
self.logger.warning("Nothing to plot.")
except ImportError:
if self.debug:
raise
__matplotlibwarn(self)
def __matplotlibwarn(self):
#FIXME move to utils.py or plotting module
self.logger.warning('Pyicos can not find an installation of matplotlib, so no plot will be drawn. If you want to get a plot with the correlation values, install the matplotlib library.')
def __calc_M(signal_a, signal_b):
return math.log(float(signal_a)/float(signal_b), 2)
def __calc_A(signal_a, signal_b):
return (math.log(float(signal_a), 2)+math.log(float(signal_b), 2))/2
def _calculate_MA(self, region_path, read_counts, factor = 1, replica_factor = 1, file_a_reader=None, file_b_reader=None, replica_reader=None):
tags_a = []
tags_b = []
numreads_background_1 = 0
numreads_background_2 = 0
total_reads_background_1 = 0
total_reads_background_2 = 0
self.logger.debug("Inside _calculate_MA")
self.regions_analyzed_count = 0
enrichment_result = [] #This will hold the name, start and end of the region, plus the A, M, 'A and 'M
if NOWRITE not in self.operations:
out_file = open(self.current_output_path, 'wb')
for region_line in open(region_path):
sline = region_line.split()
region_of_interest = self._region_from_sline(sline)
if region_of_interest:
region_a = None
replica = None
replica_tags = None
signal_a = -1
signal_b = -1
signal_background_1 = -1
signal_background_2 = -1
swap1 = Region()
swap2 = Region()
if read_counts:
signal_a = float(sline[6])
signal_b = float(sline[7])*factor
signal_background_1 = float(sline[8])
signal_background_2 = float(sline[9])*replica_factor
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
else:
self.logger.debug("Reading tags for %s ..."%region_of_interest)
if self.experiment_format == BAM:
tags_a = len(file_a_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
tags_b = len(file_b_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
tags_a = file_a_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
tags_b = file_b_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
if self.use_replica:
if self.experiment_format == BAM:
replica_tags = len(replica_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
replica_tags = replica_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
self.logger.debug("... done. tags_a: %s tags_b: %s"%(tags_a, tags_b))
#if we are using pseudocounts, use the union, use the intersection otherwise
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
signal_a = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_a, tags_a)
signal_b = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_b, tags_b)
self.already_norm = True
if not self.counts_file:
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
if self.use_replica:
replica = region_of_interest.copy()
#replica.add_tags(replica_tags)
numreads_background_1 = tags_a
numreads_background_2 = replica_tags
total_reads_background_1 = self.total_reads_a
total_reads_background_2 = self.total_reads_replica
signal_background_1 = signal_a
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.total_reads_replica, replica_tags)
else:
numreads_background_1 = 0
numreads_background_2 = 0
for i in range(0, tags_a+tags_b):
if random.uniform(0,2) > 1:
numreads_background_1 += 1
else:
numreads_background_2 += 1
total_reads_background_1 = total_reads_background_2 = self.average_total_reads
signal_background_1 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_1)
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_2)
#if there is no data in the replica or in the swap and we are not using pseudocounts, dont write the data
if signal_a > 0 and signal_b > 0 and signal_background_1 > 0 and signal_background_2 > 0 or self.use_MA:
if self.use_MA and not self.already_norm:
A = float(sline[10])
M = float(sline[11])
A_prime = float(sline[16])
M_prime = float(sline[17])
else:
if not self.already_norm: #TODO refractor
if self.len_norm: #read per kilobase in region
signal_a = 1e3*(float(signal_a)/len(region_of_interest))
signal_b = 1e3*(float(signal_b)/len(region_of_interest))
signal_background_1 = 1e3*(float(signal_background_1)/len(region_of_interest))
signal_background_2 = 1e3*(float(signal_background_2)/len(region_of_interest))
if self.n_norm: #per million reads in the sample
signal_a = 1e6*(float(signal_a)/self.total_reads_a)
signal_b = 1e6*(float(signal_b)/self.total_reads_b)
if self.use_replica:
signal_background_1 = signal_a
signal_background_2 = 1e6*(float(signal_background_2)/self.total_reads_replica)
else:
signal_background_1 = 1e6*(float(signal_background_1)/self.average_total_reads)
signal_background_2 = 1e6*(float(signal_background_2)/self.average_total_reads)
A = __calc_A(signal_a, signal_b)
M = __calc_M(signal_a, signal_b)
A_prime = __calc_A(signal_background_1, signal_background_2)
M_prime = __calc_M(signal_background_1, signal_background_2)
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
if NOWRITE not in self.operations:
out_file.write("%s\n"%("\t".join([region_of_interest.write().rstrip("\n"), str(signal_a), str(signal_b), str(signal_background_1), str(signal_background_2), str(A), str(M), str(self.total_reads_a), str(self.total_reads_b), str(tags_a), str(tags_b), str(A_prime), str(M_prime), str(total_reads_background_1), str(total_reads_background_2), str(numreads_background_1), str(numreads_background_2)])))
self.regions_analyzed_count += 1
self.logger.debug("LEAVING _calculate_MA")
if NOWRITE in self.operations:
return ""
else:
out_file.flush()
out_file.close()
# Outputting to HTML (if specified)
if self.html_output is not None:
self.logger.info("Generating HTML")
try:
from jinja2 import Environment, PackageLoader, Markup
except:
self.logger.error("Could not find the jinja2 library")
return out_file.name
loadr = PackageLoader('pyicoteolib', 'templates')
env = Environment(loader=loadr)
template = env.get_template('enrich_html.html')
def jinja_read_file(filename):
f = open(filename, 'r')
#for line in f:
# print line
txt = ''.join(f.readlines())
f.close()
return txt
env.globals['jinja_read_file'] = jinja_read_file
if self.galaxy_workarounds: # Galaxy changes the working directory when outputting multiple files
parent_dir = "./"
else:
parent_dir = os.sep.join(out_file.name.split(os.sep)[0:-1]) + "/"
plot_path = parent_dir + "enrichment_MA_" + out_file.name.split(os.sep)[-1] + ".png"
bed_path = parent_dir + out_file.name.split(os.sep)[-1]
html_file = open(self.html_output, 'w')
html_file.write(template.render({'page_title': 'Enrichment results', 'results_output': jinja_read_file(out_file.name), 'plot_path': plot_path, 'bed_path': bed_path}))
html_file.flush()
html_file.close()
return out_file.name
def _calculate_total_lengths(self):
msg = "Calculating enrichment in regions"
if self.counts_file:
self.sorted_region_path = self.counts_file
if (not self.total_reads_a or not self.total_reads_b or (not self.total_reads_replica and self.use_replica)) and not self.use_MA:
self.logger.info("... counting from counts file...")
self.total_reads_a = 0
self.total_reads_b = 0
if self.total_reads_replica:
self.total_reads_replica = 0
else:
self.total_reads_replica = 1
for line in open(self.counts_file):
try:
enrich = dict(zip(enrichment_keys, line.split()))
self.total_reads_a += float(enrich["signal_a"])
self.total_reads_b += float(enrich["signal_b"])
if self.use_replica:
self.total_reads_replica += float(enrich["signal_prime_2"])
except ValueError:
self.logger.debug("(Counting) skip header...")
else:
self.logger.info("... counting number of lines in files...")
if not self.total_reads_a:
if self.experiment_format == BAM:
self.total_reads_a = bam.size(self.current_experiment_path)
else:
self.total_reads_a = sum(1 for line in utils.open_file(self.current_experiment_path, self.experiment_format, logger=self.logger))
if not self.total_reads_b:
if self.experiment_format == BAM:
self.total_reads_b = bam.size(self.current_control_path)
else:
self.total_reads_b = sum(1 for line in utils.open_file(self.current_control_path, self.control_format, logger=self.logger))
if self.use_replica and not self.total_reads_replica:
if self.experiment_format == BAM:
self.total_reads_replica = bam.size(self.replica_path)
else:
self.total_reads_replica = sum(1 for line in utils.open_file(self.replica_path, self.experiment_format, logger=self.logger))
self.logger.debug("Number lines in experiment A: %s Experiment B: %s"%(self.total_reads_a, self.total_reads_b))
if self.use_replica:
msg = "%s using replicas..."%msg
else:
msg = "%s using swap..."%msg
self.logger.info(msg)
self.average_total_reads = (self.total_reads_a+self.total_reads_b)/2
def enrichment(self):
file_a_reader = file_b_reader = replica_reader = None
self.use_replica = (bool(self.replica_path) or (bool(self.counts_file) and self.use_replica_flag))
self.logger.debug("Use replica: %s"%self.use_replica)
if not USE_MA in self.operations:
_calculate_total_lengths(self)
if not self.counts_file:
file_a_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
file_b_reader = utils.read_fetcher(self.current_control_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.use_replica:
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.sorted_region_path:
self.logger.info('Using region file %s (%s)'%(self.region_path, self.region_format))
else:
calculate_region(self) #create region file semi automatically
self.total_regions = sum(1 for line in open(self.sorted_region_path))
self.logger.info("... analyzing regions, calculating normalized counts, A / M and replica or swap...")
self.already_norm = False
if self.use_MA:
ma_path = self.counts_file
else:
ma_path = self.sorted_region_path
out_path = _calculate_MA(self, ma_path, bool(self.counts_file), 1, 1, file_a_reader, file_b_reader, replica_reader)
self.already_norm = True
self.logger.debug("Already normalized: %s"%self.already_norm)
if self.tmm_norm:
if CHECK_REPLICAS in self.operations:
self.experiment_values = []
self.replica_values = []
self.logger.info("TMM Normalizing...")
tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, False)
replica_tmm_factor = 1
if self.use_replica:
replica_tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, True)
#move output file to old output
#use as input
old_output = '%s/notnormalized_%s'%(self._current_directory(), os.path.basename(self.current_output_path))
move(os.path.abspath(self.current_output_path), old_output)
out_path = _calculate_MA(self, old_output, True, tmm_factor, replica_tmm_factor, True) #recalculate with the new factor, using the counts again
if self.quant_norm:
self.logger.info("Full quantile normalization...")
signal_a = []
signal_prime_1 = []
enrich = []
for line in open(out_path):
sline = line.split()
enrich_line = dict(zip(enrichment_keys, sline))
enrich.append(enrich_line)
signal_a.append(float(enrich_line['signal_a']))
signal_prime_1.append(float(enrich_line['signal_prime_1']))
#full quantile normalization
signal_a.sort()
enrich.sort(key=lambda x:float(x['signal_b']))
quant_counts = open('%s/quantcounts_%s'%(self._current_directory(), os.path.basename(self.current_output_path)), 'w')
for i in range(len(enrich)):
enrich[i]['signal_b'] = signal_a[i]
self.logger.info("Full quantile normalization replica...")
#full quantile normalization of the replica
signal_prime_1.sort()
enrich.sort(key=lambda x:float(x['signal_prime_2']))
for i in range(len(enrich)):
enrich[i]['signal_prime_2'] = signal_prime_1[i]
quant_counts.write("%s\n"%"\t".join(str(enrich[i][key]) for key in enrichment_keys[:20])) #write the lines
quant_counts.flush()
out_path = _calculate_MA(self, quant_counts.name, True, 1, 1, True) #recalculate with the new factor, using the counts again
self._manage_temp_file(quant_counts.name)
self.logger.info("%s regions analyzed."%self.regions_analyzed_count)
if not NOWRITE in self.operations:
self.logger.info("Enrichment result saved to %s"%self.current_output_path)
if CHECK_REPLICAS in self.operations:
check_replica(self)
return out_path
def _sub_tmm(counts_a, counts_b, reads_a, reads_b):
return (counts_a-reads_a)/(counts_a*reads_a) + (counts_b-reads_b)/(counts_b*reads_b)
def calc_tmm_factor(self, file_counts, total_regions, replica):
if replica:
signal_1 = "signal_prime_1"
signal_2 = "signal_prime_2"
M = "M_prime"
reads_2 = self.total_reads_replica
else:
signal_1 = "signal_a"
signal_2 = "signal_b"
M = "M"
reads_2 = self.total_reads_b
values_list = []
#read the file inside the values_list
for line in open(file_counts):
sline = line.split()
values_list.append(dict(zip(enrichment_keys, sline)))
a_trim_number = int(round(total_regions*self.a_trim))
#discard the bad A
self.logger.debug("Removing the worst A (%s regions, %s percent)"%(a_trim_number, self.a_trim*100))
values_list.sort(key=lambda x:float(x["A"])) #sort by A
for i in range (0, a_trim_number):
values_list.pop(0)
values_list.sort(key=lambda x:float(x[M])) #sort by M
m_trim_number = int(round(total_regions*(self.m_trim/2))) #this number is half the value of the flag, because we will trim half below, and half over
#remove on the left
for i in range(0, m_trim_number):
values_list.pop(0)
#remove on the right
for i in range(0, m_trim_number):
values_list.pop(-1)
#now calculate the normalization factor
arriba = 0
abajo = 0
for value in values_list:
w = _sub_tmm(float(value[signal_1]), float(value[signal_2]), self.total_reads_a, reads_2)
arriba += w*float(value[M])
abajo += w
try:
factor = 2**(arriba/abajo)
except ZeroDivisionError:
self.logger.warning("Division by zero, TMM factor could not be calculated.")
factor = 1
if replica:
self.logger.info("Replica TMM Normalization Factor: %s"%factor)
else:
self.logger.info("TMM Normalization Factor: %s"%factor)
return factor
def __load_enrichment_result(values_path):
ret = []
for line in open(values_path):
sline = line.split()
try:
float(sline[1])
ret.append(dict(zip(enrichment_keys, sline)))
except ValueError:
pass
return ret
def calculate_zscore(self, values_path):
num_regions = sum(1 for line in open(values_path))
bin_size = int(self.binsize*num_regions)
if bin_size < 50:
self.logger.warning("The bin size results in a sliding window smaller than 50, adjusting window to 50 in order to get statistically meaningful results.")
bin_size = 50
bin_step = max(1, int(round(self.bin_step*bin_size)))
self.logger.info("Enrichment window calculation using a sliding window size of %s, sliding with a step of %s"%(bin_size, bin_step))
self.logger.info("... calculating zscore...")
enrichment_result = __load_enrichment_result(values_path)
enrichment_result.sort(key= lambda x:(float(x["A_prime"])))
self.logger.debug("Number of loaded counts: %s"%len(enrichment_result))
self.points = []
#get the standard deviations
for i in range(0, num_regions-bin_size+bin_step, bin_step):
#get the slice
if i+bin_size < num_regions:
result_chunk = enrichment_result[i:i+bin_size]
else:
result_chunk = enrichment_result[i:] #last chunk
#retrieve the values
mean_acum = 0
a_acum = 0
Ms_replica = []
for entry in result_chunk:
mean_acum += float(entry["M_prime"])
a_acum += float(entry["A_prime"])
Ms_replica.append(float(entry["M_prime"]))
#add them to the points of mean and sd
mean = mean_acum/len(result_chunk)
sd = math.sqrt((sum((x - mean)**2 for x in Ms_replica))/len(Ms_replica))
#the A median
A_median = a_acum / len(result_chunk)
self.points.append([A_median, mean, sd]) #The A asigned to the window, the mean and the standard deviation
#self.logger.debug("Window of %s length, with A median: %s mean: %s sd: %s"%(len(result_chunk), self.points[-1][0], self.points[-1][1], self.points[-1][2], len(self.points)))
#update z scores
for entry in enrichment_result:
entry["A_median"] = 0
entry["mean"] = 0
entry["sd"] = 0
entry["zscore"] = 0
closest_A = sys.maxint
sd_position = 0
for i in range(0, len(self.points)):
new_A = self.points[i][0]
if new_A != closest_A: #skip repeated points
if abs(closest_A - float(entry["A"])) >= abs(new_A - float(entry["A"])):
closest_A = new_A
sd_position = i
else:
break #already found, no need to go further since the points are ordered
entry["A_median"] = closest_A
if self.points: #only calculate if there where windows...
__sub_zscore(self.sdfold, entry, self.points[sd_position])
if not self.points: # ... otherwise give a warning
self.logger.warning("Insufficient number of regions analyzed (%s), z-score values could not be calculated"%num_regions)
enrichment_result.sort(key=lambda x:(x["name"], int(x["start"]), int(x["end"])))
old_file_path = '%s/before_zscore_%s'%(self._current_directory(), os.path.basename(values_path)) #create path for the outdated file
move(os.path.abspath(values_path), old_file_path) #move the file
new_file = file(values_path, 'w') #open a new file in the now empty file space
if not self.skip_header:
new_file.write('\t'.join(enrichment_keys))
new_file.write('\n')
for entry in enrichment_result:
new_file.write("\t".join(str(entry[key]) for key in enrichment_keys)+"\n")
self._manage_temp_file(old_file_path)
return values_path
def __sub_zscore(sdfold, entry, point):
entry["mean"] = str(point[1])
entry["sd"] = str(point[2])
entry["zscore"] = str(get_zscore(float(entry["M"]), float(entry["mean"]), sdfold*float(entry["sd"])))
def check_replica(self):
#discard everything below the flag
new_experiment = []
new_replica = []
min_value = sys.maxint
max_value = -sys.maxint
for i in range(len(self.replica_values)):
if self.experiment_values[i] > self.count_filter and self.replica_values[i] > self.count_filter:
new_experiment.append(math.log(self.experiment_values[i], 2))
new_replica.append(math.log(self.replica_values[i], 2))
min_value = min(min_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
max_value = max(max_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
#print self.replica_values
self.experiment_values = new_experiment
self.replica_values = new_replica
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, show, xlabel, ylabel, axhline, axis, clf, text, title, xlim, ylim
except:
__matplotlibwarn(self)
return 0
clf()
r_squared = utils.pearson(self.experiment_values, self.replica_values)**2
text(min_value+abs(max_value)*0.1, max_value-abs(max_value)*0.2, r'Pearson $R^2$= %s'%round(r_squared, 3), fontsize=18, bbox={'facecolor':'yellow', 'alpha':0.5, 'pad':10})
xlabel("log2(%s)"%self.experiment_label, fontsize=18)
ylabel("log2(%s)"%self.replica_label, fontsize=18)
xlim(min_value, max_value)
ylim(min_value, max_value)
title(self.title_label, fontsize=24)
plot(self.experiment_values, self.replica_values, '.')
self._save_figure("check_replica")
def check_replica_correlation(self):
"No usado, de momento"
min_tags = 20
experiment_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
correlations_acum = 0
num_correlations = 0
for region_line in open(self.region_path):
sline = region_line.split()
region_experiment = self._region_from_sline(sline)
region_replica = region_experiment.copy()
tags_experiment = experiment_reader.get_overlaping_clusters(region_experiment, overlap=1)
tags_replica = replica_reader.get_overlaping_clusters(region_experiment, overlap=1)
count_experiment = len(tags_experiment)
count_replica = len(tags_replica)
correlations = []
if count_experiment+count_replica > min_tags:
region_experiment.add_tags(tags_experiment, clusterize=True)
region_replica.add_tags(tags_replica, clusterize=True)
num_correlations += 1
correlation = utils.pearson(region_experiment.get_array(), region_replica.get_array())
correlations_acum += max(0, correlation)
correlations.append(correlation)
print correlations_acum/num_correlations
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, boxplot, show, legend, figure, xlabel, ylabel, subplot, axhline, axis
except:
__matplotlibwarn(self)
return 0
print correlations
boxplot(correlations)
self._save_figure("check_replica") | gpl-3.0 |
bionet/ted.python | demos/iaf_delay_demo.py | 1 | 3443 | #!/usr/bin/env python
"""
Demos of MIMO time encoding and decoding algorithms that use IAF
neurons with delays.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_delay_demo_'
output_count = 0
output_ext = '.png'
# Define input signal generation parameters:
T = 0.05
dur = 2*T
dt = 1e-6
f = 100
bw = 2*np.pi*f
np.random.seed(0)
noise_power = None
comps = 8
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
M = 3 # number of input signals
N = 9 # number of neurons
# Starting and ending points of interval that is encoded:
t_start = 0.02
t_end = t_start+T
if t_end > dur:
raise ValueError('t_start is too large')
k_start = int(np.round(t_start/dt))
k_end = int(np.round(t_end/dt))
t_enc = np.arange(k_start, k_end, dtype=np.float)*dt
u_list = []
for i in xrange(M):
fig_title_in = fig_title + ' (Signal #' + str(i+1) + ')'
print fig_title_in
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power, comps)
u /= max(u)
u *= 1.5
pl.plot_signal(t_enc, u[k_start:k_end], fig_title_in,
output_name + str(output_count) + output_ext)
u_list.append(u)
output_count += 1
t = np.arange(len(u_list[0]), dtype=np.float)*dt
# Define neuron parameters:
def randu(a, b, *d):
"""Create an array of the given shape and propagate it with random
samples from a uniform distribution over ``[a, b)``."""
if a >= b:
raise ValueError('b must exceed a')
return a+(b-a)*np.random.rand(*d)
b_list = list(randu(2.3, 3.3, N))
d_list = list(randu(0.15, 0.25, N))
k_list = list(0.01*np.ones(N))
a_list = map(list, np.reshape(np.random.exponential(0.003, N*M), (N, M)))
w_list = map(list, np.reshape(randu(0.5, 1.0, N*M), (N, M)))
fig_title = 'Signal Encoded Using Delayed IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_delay)(u_list, t_start, dt, b_list, d_list,
k_list, a_list, w_list)
for i in xrange(M):
for j in xrange(N):
fig_title_out = fig_title + '\n(Signal #' + str(i+1) + \
', Neuron #' + str(j+1) + ')'
pl.plot_encoded(t_enc, u_list[i][k_start:k_end],
s_list[j][np.cumsum(s_list[j])<T],
fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Delayed IAF Decoder'
print fig_title
u_rec_list = func_timer(iaf.iaf_decode_delay)(s_list, T, dt,
b_list, d_list, k_list,
a_list, w_list)
for i in xrange(M):
fig_title_out = fig_title + ' (Signal #' + str(i+1) + ')'
pl.plot_compare(t_enc, u_list[i][k_start:k_end],
u_rec_list[i][0:k_end-k_start], fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Normal_Contact/Normal_Behviour/SoftContact_NonLinHardSoftShear/plot.py | 1 | 1585 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
################ Node # 2 Displacement #############################
#######################################
## Analytical Solution
#######################################
finput = h5py.File('Analytical_Solution.feioutput')
plt.figure()
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# plt.plot(normal_strain,normal_stress,'-r',Linewidth=4,label='Analytical Solution')
plt.hold(True)
#######################################
## Current Solution
#######################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4,label='Numerical Solution')
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.legend()
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
# plt.show()
# #####################################################################
| cc0-1.0 |
hakujyo/chessplaying_robot | matchpicture.py | 1 | 1381 |
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
x = 0
y = 0
def makematchpicture(Grayurl):
img = cv2.imread(Grayurl, 0)
img2 = img.copy()
template = cv2.imread('test.png', 0)
w, h = template.shape[::-1]
methods = ['cv2.TM_SQDIFF']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
print("模板匹配:",int(top_left[0] + w / 2), int(top_left[1] + h / 2))
cv2.rectangle(img, top_left, bottom_right, 255, 2)
plt.figure()
plt.subplot(121), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
global x
global y
x = int(top_left[0] + w / 2)
y = int(top_left[1] + h / 2)
| gpl-3.0 |
jjx02230808/project0223 | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/Precond/MHDstabtest.py | 1 | 11477 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
m = 7
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0
IterType = 'Full'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-6)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()))
b_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()+FSpaces[1].dim(),FSpaces[0].dim()+FSpaces[1].dim()+FSpaces[2].dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
stime = time.time()
# ksp.solve(b, u)
u,it1,it2 = S.solve(A,b,u,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,HiptmairMatrices,KSPlinearfluids,kspF,Fp,MatrixLinearFluids,kspFp)
Soltime = time.time()- stime
NSits += it1
Mits +=it2
SolutionTime = SolutionTime +Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
# if eps > 100 and iter > 3:
# print 22222
# break
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# iter = 10000
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
ue =u0
pe = p0
be = b0
re = r0
# ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim, "DG")
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","SolTime","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,SolTime,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit |
NunoEdgarGub1/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
IndraVikas/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
krez13/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
DougFirErickson/neon | examples/conv_autoencoder.py | 3 | 2945 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network for MNIST data set
"""
import numpy as np
from neon.data import ArrayIterator, load_mnist
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# Set input and target to X_train
train = ArrayIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Define the layers
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin()),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin()),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni, activation=Rectlin()),
Deconv(fshape=(3, 3, 8), init=init_uni, activation=Rectlin(), strides=2),
Deconv(fshape=(2, 2, 1), init=init_uni, strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28*nrows, 28*ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = model.layers.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28*row:28*(row+1):, 28*col:28*(col+1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
print 'matplotlib needs to be manually installed to generate plots'
| apache-2.0 |
neutrons/Licorne-Py | UI-playground/layerplot.py | 1 | 2806 | from __future__ import (absolute_import, division, print_function)
from PyQt5 import QtCore, QtWidgets
import sys
import numpy as np
from layer import Layer, MSLD
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class layerplot(QtWidgets.QWidget):
def __init__(self, *args):
QtWidgets.QWidget.__init__(self, *args)
sample=[Layer(nsld=5),Layer(thickness=2.,nsld=3),Layer(nsld=5),Layer(nsld=4.,thickness=np.inf)]
self.m = PlotCanvas(sample, self)
self.m.move(0,0)
def resizeEvent(self, event):
self.m.setGeometry(self.rect())
class PlotCanvas(FigureCanvas):
def __init__(self, layers, parent=None):
self.fig = Figure()
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.data=layers
self.variable='nsld'
self.plot()
self.fig.canvas.mpl_connect('pick_event', self.onpick)
def onpick(self,event):
ind=event.ind[0]
if ind==len(self.data)-1:
ind='substrate'
print('picked layer {0}'.format(ind))
return True
def plot(self):
layer_thick_array=np.array([l.thickness for l in self.data])
layer_nsld_array =np.array([l.nsld for l in self.data])
depth=np.zeros(len(layer_thick_array))
depth[1:]=layer_thick_array.cumsum()[:-1]
patches=[]
N=len(self.data)
for i in range(N-1):
polygon=Polygon([[depth[i],0.],[depth[i],layer_nsld_array[i]],[depth[i+1],layer_nsld_array[i]],[depth[i+1],0]],True)
patches.append(polygon)
polygon=Polygon([[depth[N-1],0.],[depth[N-1],layer_nsld_array[N-1]],[depth[N-1]+1,layer_nsld_array[N-1]],[depth[N-1]+1,0]],True)
patches.append(polygon)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, picker=True)
colors = 100*np.random.rand(len(patches))
p.set_array(np.array(colors))
ax = self.figure.add_subplot(111)
ax.add_collection(p)
ax.set_title('NSLD')
ax.set_xlim(np.array([0,depth[-1]])*1.2)
ax.set_ylim(np.array([0,layer_nsld_array.max()])*1.2) #TODO allow negative
ax.set_xlabel('Thickness')
ax.set_ylabel('NSLD')
self.draw()
if __name__=='__main__':
app=QtWidgets.QApplication(sys.argv)
mainForm=layerplot()
mainForm.show()
sys.exit(app.exec_())
| gpl-3.0 |
ashhher3/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
aetilley/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
andyh616/mne-python | mne/viz/tests/test_ica.py | 7 | 6812 | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
import os.path as op
import warnings
from numpy.testing import assert_raises
from mne import io, read_events, Epochs, read_cov
from mne import pick_types
from mne.utils import run_tests_if_main, requires_sklearn
from mne.viz.utils import _fake_click
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
def _get_raw(preload=False):
return io.Raw(raw_fname, preload=preload)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions
"""
import matplotlib.pyplot as plt
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica_picks = _get_picks(raw)
ica.fit(raw, picks=ica_picks)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear', res=16)
ica.info = None
assert_raises(RuntimeError, ica.plot_components, 1)
plt.close('all')
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel
"""
import matplotlib.pyplot as plt
raw = io.Raw(raw_fname, preload=False)
raw.crop(0, 1, copy=False)
raw.preload_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=ica_picks)
raw.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=raw)
ica.plot_sources(epochs)
epochs.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=epochs)
epochs.info['bads'] = []
with warnings.catch_warnings(record=True): # no labeled objects mpl
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.plot_sources(evoked, exclude=[0])
ica.exclude = [0]
ica.plot_sources(evoked) # does the same thing
assert_raises(ValueError, ica.plot_sources, 'meeow')
plt.close('all')
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning
"""
import matplotlib.pyplot as plt
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average())
assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
ica.plot_overlay(raw)
plt.close('all')
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores
"""
import matplotlib.pyplot as plt
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
assert_raises(ValueError, ica.plot_scores, [0.2])
plt.close('all')
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
import matplotlib.pyplot as plt
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
fig = ica.plot_sources(raw, exclude=[0], title='Components')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('f11')
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, exclude=[0], title='Components')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('f11')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
niketanpansare/systemml | scripts/perftest/python/google_docs/stats.py | 15 | 3540 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import argparse
import os
import pprint
from os.path import join
import matplotlib.pyplot as plt
from gdocs_utils import auth
# Dict
# {algo_name : [algo_1.0': t1, 'algo_2.0': t2]}
def get_formatted_data(sheet_data):
"""
Read all the data from google sheets and transforms it into a dictionary that can be
use for plotting later
"""
algo_dict = {}
for i in sheet_data:
inn_count = 0
data = []
for key, val in i.items():
inn_count += 1
if inn_count < 3:
data.append(key)
data.append(val)
if inn_count == 2:
t1, v1, _, v2 = data
if len(str(v2)) > 0:
if v1 not in algo_dict:
algo_dict[v1] = [{t1: v2}]
else:
algo_dict[v1].append({t1: v2})
inn_count = 0
data = []
return algo_dict
def plot(x, y, xlab, ylab, title):
"""
Save plots to the current folder based on the arguments
"""
CWD = os.getcwd()
PATH = join(CWD, title)
width = .35
plt.bar(x, y, color="red", width=width)
plt.xticks(x)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.savefig(PATH + '.png')
print('Plot {} generated'.format(title))
return plt
# Example Usage
# ./stats.py --auth ../key/client_json.json --exec-mode singlenode
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Statistics Script')
cparser.add_argument('--auth', help='Location to read auth file',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Execution mode', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--plot', help='Algorithm to plot', metavar='')
args = cparser.parse_args()
sheet = auth(args.auth, args.exec_type)
all_data = sheet.get_all_records()
plot_data = get_formatted_data(all_data)
if args.plot is not None:
print(plot_data[args.plot])
title = args.plot
ylab = 'Time in sec'
xlab = 'Version'
x = []
y = []
for i in plot_data[args.plot]:
version = list(i.keys())[0]
time = list(i.values())[0]
y.append(time)
x.append(version)
x = list(map(lambda x: float(x.split('_')[1]), x))
plot(x, y, xlab, ylab, title)
else:
pprint.pprint(plot_data, width=1) | apache-2.0 |
srinathv/vispy | examples/basics/plotting/mpl_plot.py | 14 | 1579 | # -*- coding: utf-8 -*-
# vispy: testskip
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Example demonstrating how to use vispy.pyplot, which uses mplexporter
to convert matplotlib commands to vispy draw commands.
Requires matplotlib.
"""
import numpy as np
# You can use either matplotlib or vispy to render this example:
# import matplotlib.pyplot as plt
import vispy.mpl_plot as plt
from vispy.io import read_png, load_data_file
n = 200
freq = 10
fs = 100.
t = np.arange(n) / fs
tone = np.sin(2*np.pi*freq*t)
noise = np.random.RandomState(0).randn(n)
signal = tone + noise
magnitude = np.abs(np.fft.fft(signal))
freqs = np.fft.fftfreq(n, 1. / fs)
flim = n // 2
# Signal
fig = plt.figure()
ax = plt.subplot(311)
ax.imshow(read_png(load_data_file('pyplot/logo.png')))
ax = plt.subplot(312)
ax.plot(t, signal, 'k-')
# Frequency content
ax = plt.subplot(313)
idx = np.argmax(magnitude[:flim])
ax.text(freqs[idx], magnitude[idx], 'Max: %s Hz' % freqs[idx],
verticalalignment='top')
ax.plot(freqs[:flim], magnitude[:flim], 'k-o')
plt.draw()
# NOTE: show() has currently been overwritten to convert to vispy format, so:
# 1. It must be called to show the results, and
# 2. Any plotting commands executed after this will not take effect.
# We are working to remove this limitation.
if __name__ == '__main__':
plt.show(True)
| bsd-3-clause |
ChinaQuants/bokeh | bokeh/compat/bokeh_renderer.py | 2 | 17762 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import numpy as np
import pandas as pd
from six import string_types
from ..models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, Plot, CategoricalAxis, Legend)
from ..models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ..plotting import DEFAULT_TOOLS
from ..plotting_helpers import _process_tools_arg
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_dashes, get_props_cycled, is_ax_end, xkcd_line
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
#Make sure that all information about the axes are passed to the properties
if props.get('xscale', False):
props['axes'][0]['scale'] = props['xscale']
if props.get('yscale', False):
props['axes'][1]['scale'] = props['yscale']
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
if dim==0:
gridlines = ax.get_xgridlines()
else:
gridlines = ax.get_ygridlines()
if gridlines:
self.make_grid(baxis, dim, gridlines[0])
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
lgnd = Legend(orientation="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.legends.append((label, [self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
rng = FactorRange(factors=[str(x) for x in tf], offset=-1.0)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# Set the tick properties (for now just turn off if necessary)
# TODO: mirror tick properties
if props['nticks'] == 0:
laxis.major_tick_line_color = None
laxis.minor_tick_line_color = None
laxis.major_label_text_color = None
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticklabels = ax.get_ticklabels()
if ticklabels:
self.text_props(ticklabels[0], laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension, gridline):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=gridline.get_color(),
grid_line_width=gridline.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| bsd-3-clause |
hurdlea/SimpleCV | SimpleCV/Shell/Shell.py | 10 | 7838 | #!/usr/bin/python
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# SimpleCV
# a kinder, gentler machine vision python library
#-----------------------------------------------------------------------
# SimpleCV is an interface for Open Source machine
# vision libraries in Python.
# It provides a concise, readable interface for cameras,
# image manipulation, feature extraction, and format conversion.
# Our mission is to give casual users a comprehensive interface
# for basic machine vision functions and an
# elegant programming interface for advanced users.
#
# more info:
# http://www.simplecv.org
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#load system libraries
from subprocess import call
import platform
import webbrowser
import sys
from SimpleCV.__init__ import *
#Load simpleCV libraries
from SimpleCV.Shell.Tutorial import *
from SimpleCV.Shell.Example import *
try:
from SimpleCV import __version__ as SIMPLECV_VERSION
except ImportError:
SIMPLECV_VERSION = ''
#Command to clear the shell screen
def shellclear():
if platform.system() == "Windows":
return
call("clear")
#method to get magic_* methods working in bpython
def make_magic(method):
def wrapper(*args, **kwargs):
if not args:
return method('', '')
return method('', *args, **kwargs)
return wrapper
def plot(arg):
try:
import matplotlib.pyplot as plt
except ImportError:
logger.warning("Matplotlib is not installed and required")
return
print "args", arg
print "type", type(arg)
plt.plot(arg)
plt.show()
def hist(arg):
try:
import pylab
except ImportError:
logger.warning("pylab is not installed and required")
return
plot(pylab.hist(arg)[1])
def magic_clear(self, arg):
shellclear()
def magic_forums(self, arg):
webbrowser.open('http://help.simplecv.org/questions/')
def magic_walkthrough(self, arg):
webbrowser.open('http://examples.simplecv.org/en/latest/')
def magic_docs(self, arg):
webbrowser.open('http://www.simplecv.org/docs/')
banner = '+-----------------------------------------------------------+\n'
banner += ' SimpleCV '
banner += SIMPLECV_VERSION
banner += ' [interactive shell] - http://simplecv.org\n'
banner += '+-----------------------------------------------------------+\n'
banner += '\n'
banner += 'Commands: \n'
banner += '\t"exit()" or press "Ctrl+ D" to exit the shell\n'
banner += '\t"clear()" to clear the shell screen\n'
banner += '\t"tutorial()" to begin the SimpleCV interactive tutorial\n'
banner += '\t"example()" gives a list of examples you can run\n'
banner += '\t"forums()" will launch a web browser for the help forums\n'
banner += '\t"walkthrough()" will launch a web browser with a walkthrough\n'
banner += '\n'
banner += 'Usage:\n'
banner += '\tdot complete works to show library\n'
banner += '\tfor example: Image().save("/tmp/test.jpg") will dot complete\n'
banner += '\tjust by touching TAB after typing Image().\n'
banner += '\n'
banner += 'Documentation:\n'
banner += '\thelp(Image), ?Image, Image?, or Image()? all do the same\n'
banner += '\t"docs()" will launch webbrowser showing documentation'
banner += '\n'
exit_msg = '\n... [Exiting the SimpleCV interactive shell] ...\n'
def setup_ipython():
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = "SimpleCV:\\#> "
cfg.PromptManager.out_template = "SimpleCV:\\#: "
#~ cfg.InteractiveShellEmbed.prompt_in1 = "SimpleCV:\\#> "
#~ cfg.InteractiveShellEmbed.prompt_out="SimpleCV:\\#: "
scvShell = InteractiveShellEmbed(config=cfg, banner1=banner,
exit_msg=exit_msg)
scvShell.define_magic("tutorial", magic_tutorial)
scvShell.define_magic("clear", magic_clear)
scvShell.define_magic("example", magic_examples)
scvShell.define_magic("forums", magic_forums)
scvShell.define_magic("walkthrough", magic_walkthrough)
scvShell.define_magic("docs", magic_docs)
except ImportError:
try:
from IPython.Shell import IPShellEmbed
argsv = ['-pi1', 'SimpleCV:\\#>', '-pi2', ' .\\D.:', '-po',
'SimpleCV:\\#>', '-nosep']
scvShell = IPShellEmbed(argsv)
scvShell.set_banner(banner)
scvShell.set_exit_msg(exit_msg)
scvShell.IP.api.expose_magic("tutorial", magic_tutorial)
scvShell.IP.api.expose_magic("clear", magic_clear)
scvShell.IP.api.expose_magic("example", magic_examples)
scvShell.IP.api.expose_magic("forums", magic_forums)
scvShell.IP.api.expose_magic("walkthrough", magic_walkthrough)
scvShell.IP.api.expose_magic("docs", magic_docs)
except ImportError:
raise
return scvShell()
def setup_bpython():
import bpython
example = make_magic(magic_examples)
clear = make_magic(magic_clear)
docs = make_magic(magic_docs)
tutorial = make_magic(magic_tutorial)
walkthrough = make_magic(magic_walkthrough)
forums = make_magic(magic_forums)
temp = locals().copy()
temp.update(globals())
return bpython.embed(locals_=temp, banner=banner)
def setup_plain():
import code
return code.interact(banner=banner, local=globals())
def run_notebook(mainArgs):
if IPython.__version__.startswith('1.'):
"""Run the ipython notebook server"""
from IPython.html import notebookapp
from IPython.html.services.kernels import kernelmanager
else:
from IPython.frontend.html.notebook import notebookapp
from IPython.frontend.html.notebook import kernelmanager
code = ""
code += "from SimpleCV import *;"
code += "init_options_handler.enable_notebook();"
kernelmanager.MappingKernelManager.first_beat = 30.0
app = notebookapp.NotebookApp.instance()
mainArgs += [
'--port', '5050',
'--c', code,
]
app.initialize(mainArgs)
app.start()
sys.exit()
def self_update():
URL = "https://github.com/sightmachine/SimpleCV/zipball/master"
command = "pip install -U %s" % URL
if os.getuid() == 0:
command = "sudo " + command
returncode = call(command, shell=True)
sys.exit()
def run_shell(shell=None):
shells = ['setup_ipython', 'setup_bpython', 'setup_plain']
available_shells = [shell] if shell else shells
for shell in available_shells:
try:
return globals()[shell]()
except ImportError:
pass
raise ImportError
def main(*args):
log_level = logging.WARNING
interface = None
if len(sys.argv) > 1 and len(sys.argv[1]) > 1:
flag = sys.argv[1]
if flag == 'notebook':
run_notebook(sys.argv[1:])
sys.exit()
elif flag == 'update':
print "Updating SimpleCV....."
self_update()
if flag in ['--headless', 'headless']:
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
elif flag in ['--nowarnings', 'nowarnings']:
log_level = logging.INFO
elif flag in ['--debug', 'debug']:
log_level = logging.DEBUG
if flag in ['--ipython', 'ipython']:
interface = 'setup_ipython'
elif flag in ['--bpython', 'bpython']:
interface = 'setup_bpython'
else:
interface = 'setup_plain'
init_logging(log_level)
shellclear()
scvShell = run_shell(interface)
| bsd-3-clause |
huobaowangxi/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
jandom/rdkit | rdkit/Chem/Draw/UnitTestSimilarityMaps.py | 1 | 7019 | # $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
""" unit testing code for molecule drawing
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest, os, tempfile
from rdkit import Chem
from rdkit.Chem import Draw
import platform
if platform.system() == "Linux":
import os, sys
if not os.environ.get("DISPLAY", None):
try:
# Force matplotlib to not use any Xwindows backend.
import matplotlib
print("Forcing use of Agg renderer", file=sys.stderr)
matplotlib.use('Agg')
except ImportError:
pass
try:
from rdkit.Chem.Draw import SimilarityMaps as sm
except ImportError:
sm = None
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
self.mol1 = Chem.MolFromSmiles('c1ccccc1')
self.mol2 = Chem.MolFromSmiles('c1ccncc1')
def testSimilarityMap(self):
# Morgan2 BV
refWeights = [0.5, 0.5, 0.5, -0.5, 0.5, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
for w, r in zip(weights, refWeights):
self.assertEqual(w, r)
fig, maxWeight = sm.GetSimilarityMapForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
self.assertEqual(maxWeight, 0.5)
weights, maxWeight = sm.GetStandardizedWeights(weights)
self.assertEqual(maxWeight, 0.5)
refWeights = [1.0, 1.0, 1.0, -1.0, 1.0, 1.0]
for w, r in zip(weights, refWeights):
self.assertEqual(w, r)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='count'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2,
lambda m, i: sm.GetMorganFingerprint(m, i, fpType='bv', useFeatures=True))
self.assertTrue(weights[3] < 0)
# hashed AP BV
refWeights = [0.09523, 0.17366, 0.17366, -0.23809, 0.17366, 0.17366]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='bv', nBits=1024))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# hashed TT BV
refWeights = [0.5, 0.5, -0.16666, -0.5, -0.16666, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2,
lambda m, i: sm.GetTTFingerprint(m, i, fpType='bv', nBits=1024, nBitsPerEntry=1))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# RDK fingerprint BV
refWeights = [0.42105, 0.42105, 0.42105, -0.32895, 0.42105, 0.42105]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetRDKFingerprint(m, i, nBits=1024, nBitsPerHash=1))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
def testSimilarityMapKWArgs(self):
# Morgan2 BV
m1 = Chem.MolFromSmiles('CC[C@](F)(Cl)c1ccccc1')
m2 = Chem.MolFromSmiles('CC[C@@](F)(Cl)c1ccccc1')
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetAPFingerprint(m, atomId=i, includeChirality=False))
for w in weights:
self.assertAlmostEqual(w, 0.100, 4)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetAPFingerprint(m, atomId=i, includeChirality=True))
for i, w in enumerate(weights):
if i != 2:
self.assertAlmostEqual(w, 0.098, 3)
else:
self.assertAlmostEqual(w, -0.082, 3)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetTTFingerprint(m, atomId=i, includeChirality=False))
for w in weights:
self.assertTrue(w > 0.0)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetTTFingerprint(m, atomId=i, includeChirality=True))
for i, w in enumerate(weights):
if i > 4:
self.assertTrue(w > 0.0)
else:
self.assertTrue(w < 0.0)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetMorganFingerprint(m, radius=1, atomId=i, useChirality=False))
weights2 = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetMorganFingerprint(m, radius=1, atomId=i, useChirality=True))
# testing explicit values here seems silly, just check that the contribution of the
# chiral center drops:
self.assertTrue(weights[2] > weights2[2])
if __name__ == '__main__':
try:
import matplotlib
from rdkit.Chem.Draw.mplCanvas import Canvas
except ImportError:
pass
except RuntimeError: # happens with GTK can't initialize
pass
else:
unittest.main()
| bsd-3-clause |
fengzhe29888/gnuradio-old | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
bnaul/scikit-learn | examples/preprocessing/plot_all_scaling.py | 14 | 13721 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the :ref:`california_housing_dataset` have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way they estimate the parameters used to shift and scale each
feature.
:class:`~sklearn.preprocessing.QuantileTransformer` provides non-linear
transformations in which distances
between marginal outliers and inliers are shrunk.
:class:`~sklearn.preprocessing.PowerTransformer` provides
non-linear transformations in which data is mapped to a normal distribution to
stabilize variance and minimize skewness.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Thomas Unterthiner
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after power transformation (Yeo-Johnson)',
PowerTransformer(method='yeo-johnson').fit_transform(X)),
('Data after power transformation (Box-Cox)',
PowerTransformer(method='box-cox').fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X)),
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
# plasma does not exist in matplotlib < 1.5
cmap = getattr(cm, 'plasma_r', cm.hot_r)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cmap(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
# %%
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the sides of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cmap,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
# %%
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
# %%
# StandardScaler
# --------------
#
# :class:`~sklearn.preprocessing.StandardScaler` removes the mean and scales
# the data to unit variance. The scaling shrinks the range of the feature
# values as shown in the left figure below.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# :class:`~sklearn.preprocessing.StandardScaler` therefore cannot guarantee
# balanced feature scales in the
# presence of outliers.
make_plot(1)
# %%
# MinMaxScaler
# ------------
#
# :class:`~sklearn.preprocessing.MinMaxScaler` rescales the data set such that
# all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compresses all inliers into the narrow range [0, 0.005] for the transformed
# number of households.
#
# Both :class:`~sklearn.preprocessing.StandardScaler` and
# :class:`~sklearn.preprocessing.MinMaxScaler` are very sensitive to the
# presence of outliers.
make_plot(2)
# %%
# MaxAbsScaler
# ------------
#
# :class:`~sklearn.preprocessing.MaxAbsScaler` is similar to
# :class:`~sklearn.preprocessing.MinMaxScaler` except that the
# values are mapped in the range [0, 1]. On positive only data, both scalers
# behave similarly.
# :class:`~sklearn.preprocessing.MaxAbsScaler` therefore also suffers from
# the presence of large outliers.
make_plot(3)
# %%
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of
# :class:`~sklearn.preprocessing.RobustScaler`
# is based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
# %%
# PowerTransformer
# ----------------
#
# :class:`~sklearn.preprocessing.PowerTransformer` applies a power
# transformation to each feature to make the data more Gaussian-like in order
# to stabilize variance and minimize skewness. Currently the Yeo-Johnson
# and Box-Cox transforms are supported and the optimal
# scaling factor is determined via maximum likelihood estimation in both
# methods. By default, :class:`~sklearn.preprocessing.PowerTransformer` applies
# zero-mean, unit variance normalization. Note that
# Box-Cox can only be applied to strictly positive data. Income and number of
# households happen to be strictly positive, but if negative values are present
# the Yeo-Johnson transformed is preferred.
make_plot(5)
make_plot(6)
# %%
# QuantileTransformer (uniform output)
# ------------------------------------
#
# :class:`~sklearn.preprocessing.QuantileTransformer` applies a non-linear
# transformation such that the
# probability density function of each feature will be mapped to a uniform
# or Gaussian distribution. In this case, all the data, including outliers,
# will be mapped to a uniform distribution with the range [0, 1], making
# outliers indistinguishable from inliers.
#
# :class:`~sklearn.preprocessing.RobustScaler` and
# :class:`~sklearn.preprocessing.QuantileTransformer` are robust to outliers in
# the sense that adding or removing outliers in the training set will yield
# approximately the same transformation. But contrary to
# :class:`~sklearn.preprocessing.RobustScaler`,
# :class:`~sklearn.preprocessing.QuantileTransformer` will also automatically
# collapse any outlier by setting them to the a priori defined range boundaries
# (0 and 1). This can result in saturation artifacts for extreme values.
make_plot(7)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# To map to a Gaussian distribution, set the parameter
# ``output_distribution='normal'``.
make_plot(8)
# %%
# Normalizer
# ----------
#
# The :class:`~sklearn.preprocessing.Normalizer` rescales the vector for each
# sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(9)
plt.show()
| bsd-3-clause |
tsgit/invenio | modules/bibauthorid/lib/bibauthorid_tortoise.py | 5 | 16153 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
# import cPickle as SER
import msgpack as SER
import gzip as filehandler
# This is supposed to defeat a bit of the python vm performance losses:
import sys
sys.setcheckinterval(1000000)
try:
from collections import defaultdict
except:
from invenio.containerutils import defaultdict
from invenio.bibauthorid_logutils import Logger
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_marktables
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_personid
from invenio.bibauthorid_wedge import wedge
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_backinterface import empty_tortoise_results_table
from invenio.bibauthorid_backinterface import remove_clusters_by_name
from invenio.bibauthorid_prob_matrix import prepare_matrix
# Scheduler is [temporarily] deprecated in favour of the much simpler schedule_workers
# from invenio.bibauthorid_scheduler import schedule, matrix_coefs
from invenio.bibauthorid_general_utils import schedule_workers
logger = Logger("tortoise")
'''
There are three main entry points to tortoise
i) tortoise
Performs disambiguation iteration.
The arguemnt pure indicates whether to use
the claims and the rejections or not.
Use pure=True only to test the accuracy of tortoise.
ii) tortoise_from_scratch
NOT RECOMMENDED!
Use this function only if you have just
installed invenio and this is your first
disambiguation or if personid is broken.
iii) tortoise_last_name
Computes the clusters for only one last name
group. Is is primary used for testing. It
may also be used to fix a broken last name
cluster. It does not involve multiprocessing
so it is convinient to debug with pdb.
'''
# Exit codes:
# The standard ones are not well documented
# so we are using random numbers.
def tortoise_from_scratch():
logger.log("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
logger.log("Building all matrices.")
cluster_sets = [(s,) for s in cluster_sets]
schedule_workers(lambda x: force_create_matrix(x, force=True), cluster_sets)
empty_tortoise_results_table()
logger.log("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
cluster_sets = [(s(),) for s in cluster_sets]
logger.log("Starting disambiguation.")
schedule_workers(wedge_and_store, cluster_sets)
def tortoise(pure=False,
force_matrix_creation=False,
skip_matrix_creation=False,
last_run=None):
assert not force_matrix_creation or not skip_matrix_creation
# The computation must be forced in case we want
# to compute pure results
force_matrix_creation = force_matrix_creation or pure
if not skip_matrix_creation:
logger.log("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
logger.log("Building all matrices.")
clusters = [(s,) for s in clusters]
schedule_workers(lambda x: force_create_matrix(x, force=force_matrix_creation), clusters)
logger.log("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
clusters = [(s(),) for s in clusters]
logger.log("Starting disambiguation.")
schedule_workers(wedge_and_store, clusters)
def tortoise_last_name(name, wedge_threshold=None, from_mark=True, pure=False):
logger.log('Start working on %s' % name)
assert not(from_mark and pure)
lname = generate_last_name_cluster_str(name)
if from_mark:
logger.log(' ... from mark!')
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
logger.log(' ... delayed done')
else:
logger.log(' ... from pid, pure=%s' % str(pure))
clusters, lnames, sizes = delayed_cluster_sets_from_personid(pure)
logger.log(' ... delayed pure done!')
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
cluster_set = cluster()
logger.log("Found, %s(%s). Total number of bibs: %d." % (name, lname, size))
create_matrix(cluster_set, False)
wedge_and_store(cluster_set)
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters" % (lname))
def tortoise_last_names(names_args_list):
schedule_workers(tortoise_last_name, names_args_list, with_kwargs=True)
def _collect_statistics_lname_coeff(params):
lname = params[0]
coeff = params[1]
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
logger.log("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, True, coeff)
remove_clusters_by_name(cluster_set.last_name)
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters," % (lname))
def _create_matrix(lname):
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
logger.log("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
cluster_set.store()
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters, not creating matrix" % (lname))
def tortoise_tweak_coefficient(lastnames, min_coef, max_coef, stepping, build_matrix=True):
logger.log('Coefficient tweaking!')
logger.log('Cluster sets from mark...')
lnames = set([generate_last_name_cluster_str(n) for n in lastnames])
coefficients = [x / 100. for x in range(int(min_coef * 100), int(max_coef * 100), int(stepping * 100))]
if build_matrix:
schedule_workers(_create_matrix, lnames)
schedule_workers(_collect_statistics_lname_coeff, ((x, y) for x in lnames for y in coefficients))
def tortoise_coefficient_statistics(pickle_output=None, generate_graphs=True):
import matplotlib.pyplot as plt
plt.ioff()
def _gen_plot(data, filename):
plt.clf()
ax = plt.subplot(111)
ax.grid(visible=True)
x = sorted(data.keys())
w = [data[k][0] for k in x]
try:
wscf = max(w)
except:
wscf = 0
w = [float(i) / wscf for i in w]
y = [data[k][1] for k in x]
maxi = [data[k][3] for k in x]
mini = [data[k][2] for k in x]
lengs = [data[k][4] for k in x]
try:
ml = float(max(lengs))
except:
ml = 1
lengs = [k / ml for k in lengs]
normalengs = [data[k][5] for k in x]
ax.plot(x, y, '-o', label='avg')
ax.plot(x, maxi, '-o', label='max')
ax.plot(x, mini, '-o', label='min')
ax.plot(x, w, '-x', label='norm %s' % str(wscf))
ax.plot(x, lengs, '-o', label='acl %s' % str(int(ml)))
ax.plot(x, normalengs, '-o', label='ncl')
plt.ylim(ymax=1., ymin=-0.01)
plt.xlim(xmax=1., xmin=-0.01)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=6, mode="expand", borderaxespad=0.)
plt.savefig(filename)
files = ['/tmp/baistats/' + x for x in os.listdir('/tmp/baistats/') if x.startswith('cluster_status_report_pid')]
fnum = float(len(files))
quanta = .1 / fnum
total_stats = 0
used_coeffs = set()
used_clusters = set()
# av_counter, avg, min, max, nclus, normalized_avg
cluster_stats = defaultdict(lambda: defaultdict(lambda: [0., 0., 0., 0., 0., 0.]))
coeff_stats = defaultdict(lambda: [0., 0., 0., 0., 0., 0.])
def gen_graphs(only_synthetic=False):
logger.update_status(0, 'Generating coefficients graph...')
_gen_plot(coeff_stats, '/tmp/graphs/AAAAA-coefficients.svg')
if not only_synthetic:
cn = cluster_stats.keys()
l = float(len(cn))
for i, c in enumerate(cn):
logger.update_status(i / l, 'Generating name graphs... %s' % str(c))
_gen_plot(cluster_stats[c], '/tmp/graphs/CS-%s.png' % str(c))
for i, fi in enumerate(files):
if generate_graphs:
if i % 1000 == 0:
gen_graphs(True)
f = filehandler.open(fi, 'r')
status = i / fnum
logger.update_status(status, 'Loading ' + fi[fi.find('lastname') + 9:])
contents = SER.load(f)
f.close()
cur_coef = contents[0]
cur_clust = contents[1]
cur_maxlen = float(contents[3])
if cur_coef:
total_stats += 1
used_coeffs.add(cur_coef)
used_clusters.add(cur_clust)
logger.update_status(status + 0.2 * quanta, ' Computing averages...')
cur_clen = len(contents[2])
cur_coeffs = [x[2] for x in contents[2]]
cur_clustnumber = float(len(set([x[0] for x in contents[2]])))
assert cur_clustnumber > 0 and cur_clustnumber < cur_maxlen, "Error, found log with strange clustnumber! %s %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_maxlen),
str(cur_clustnumber))
if cur_coeffs:
assert len(cur_coeffs) == cur_clen and cur_coeffs, "Error, there is a cluster witohut stuff? %s %s %s" % (
str(cur_clust), str(cur_coef), str(cur_coeffs))
assert all([x >= 0 and x <= 1 for x in cur_coeffs]), "Error, a coefficient is wrong here! Check me! %s %s %s" % (
str(cur_clust), str(cur_coef), str(cur_coeffs))
cur_min = min(cur_coeffs)
cur_max = max(cur_coeffs)
cur_avg = sum(cur_coeffs) / cur_clen
logger.update_status(status + 0.4 * quanta, ' comulative per coeff...')
avi = coeff_stats[cur_coef][0]
# number of points
coeff_stats[cur_coef][0] = avi + 1
# average of coefficients
coeff_stats[cur_coef][1] = (coeff_stats[cur_coef][1] * avi + cur_avg) / (avi + 1)
# min coeff
coeff_stats[cur_coef][2] = min(coeff_stats[cur_coef][2], cur_min)
# max coeff
coeff_stats[cur_coef][3] = max(coeff_stats[cur_coef][3], cur_max)
# avg number of clusters
coeff_stats[cur_coef][4] = (coeff_stats[cur_coef][4] * avi + cur_clustnumber) / (avi + 1)
# normalized avg number of clusters
coeff_stats[cur_coef][5] = (coeff_stats[cur_coef][5] * avi + cur_clustnumber / cur_maxlen) / (avi + 1)
logger.update_status(status + 0.6 * quanta, ' comulative per cluster per coeff...')
avi = cluster_stats[cur_clust][cur_coef][0]
cluster_stats[cur_clust][cur_coef][0] = avi + 1
cluster_stats[cur_clust][cur_coef][1] = (
cluster_stats[cur_clust][cur_coef][1] * avi + cur_avg) / (avi + 1)
cluster_stats[cur_clust][cur_coef][2] = min(cluster_stats[cur_clust][cur_coef][2], cur_min)
cluster_stats[cur_clust][cur_coef][3] = max(cluster_stats[cur_clust][cur_coef][3], cur_max)
cluster_stats[cur_clust][cur_coef][4] = (
cluster_stats[cur_clust][cur_coef][4] * avi + cur_clustnumber) / (avi + 1)
cluster_stats[cur_clust][cur_coef][5] = (
cluster_stats[cur_clust][cur_coef][5] * avi + cur_clustnumber / cur_maxlen) / (avi + 1)
logger.update_status_final('Done!')
if generate_graphs:
gen_graphs()
if pickle_output:
logger.update_status(0, 'Dumping to file...')
f = open(pickle_output, 'w')
SER.dump(
{'cluster_stats': dict((x,
dict(cluster_stats[x])) for x in cluster_stats.iterkeys()),
'coeff_stats': dict((coeff_stats))},
f)
f.close()
def create_matrix(cluster_set, force):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start building matrix for %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
return prepare_matrix(cluster_set, force)
def force_create_matrix(cluster_set, force):
logger.log("Building a cluster set.")
return create_matrix(cluster_set(), force)
def wedge_and_store(cluster_set, wedge_threshold=None):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, force_wedge_thrsh=wedge_threshold)
remove_clusters_by_name(cluster_set.last_name)
cluster_set.store()
return True
def force_wedge_and_store(cluster_set):
logger.log("Building a cluster set.")
return wedge_and_store(cluster_set())
#[temporarily] deprecated
# def schedule_create_matrix(cluster_sets, sizes, force):
# def create_job(cluster):
# def ret():
# return force_create_matrix(cluster, force)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%smatrix_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
#
#
# def schedule_wedge_and_store(cluster_sets, sizes):
# def create_job(cluster):
# def ret():
# return force_wedge_and_store(cluster)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%swedge_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
| gpl-2.0 |
poryfly/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
PMBio/limix | limix/scripts/iSet_postprocess.py | 1 | 2451 | #! /usr/bin/env python
# Copyright(c) 2014, The mtSet developers (Francesco Paolo Casale, Barbara Rakitsch, Oliver Stegle)
# All rights reserved.
from optparse import OptionParser
from limix.mtSet.core.iset_utils import calc_emp_pv_eff
import pandas as pd
import glob
import os
import time
import sys
def entry_point():
parser = OptionParser()
parser.add_option("--resdir", dest='resdir', type=str, default='./')
parser.add_option("--outfile", dest='outfile', type=str, default=None)
#parser.add_option("--manhattan_plot", dest='manhattan',action="store_true",default=False)
parser.add_option("--tol", dest='tol', type=float, default=4e-3)
(options, args) = parser.parse_args()
resdir = options.resdir
out_file = options.outfile
tol = options.tol
print('.. load permutation results')
file_name = os.path.join(resdir, '*.iSet.perm')
files = glob.glob(file_name)
df0 = pd.DataFrame()
for _file in files:
print(_file)
df0 = df0.append(pd.read_csv(_file, index_col=0))
print('.. load real results')
file_name = os.path.join(resdir, '*.iSet.real')
files = glob.glob(file_name)
df = pd.DataFrame()
for _file in files:
print(_file)
df = df.append(pd.read_csv(_file, index_col=0))
#calculate P values for the three tests
for test in ['mtSet', 'iSet', 'iSet-het']:
df[test+' pv'] = calc_emp_pv_eff(df[test+' LLR'].values,
df0[test+' LLR0'].values)
print(('.. saving %s' % out_file+'.res'))
df.to_csv(out_file+'.res')
if 0:
if options.manhattan:
import limix.utils.plot as plot
if not os.path.exists(options.outfile):
os.makedirs(options.outfile)
def plot_manhattan(pv, out_file):
import matplotlib.pylab as PLT
import scipy as SP
posCum = SP.arange(pv.shape[0])
idx=~SP.isnan(pv)
plot.plot_manhattan(posCum[idx],pv[idx],alphaNS=1.0,alphaS=1.0)
PLT.savefig(out_file)
for test in ['mtSet', 'iSet', 'iSet-het']:
out_file = os.path.join(options.outfile,
'iSet.%s_pv.manhattan.png'\
% (test,))
print((".. saving " + out_file))
plot_manhattan(df['%s pv' % test].values, out_file)
| apache-2.0 |
jcnelson/syndicate | papers/paper-nsdi2013/data/tools/analysis/Nr1w.py | 2 | 9504 | #!/usr/bin/python
import analysis
import os
import sys
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
def eval_dict( s ):
ret = None
try:
exec("ret = " + s)
except:
return None
return ret
def cdf_compare( dists, title, xl, xr, yl, yr, labels ):
mm = min(dists[0])
ma = max(dists[0])
cnt = len(dists[0])
for i in xrange(1,len(dists)):
mm = min( mm, min(dists[i]) )
ma = max( ma, max(dists[i]) )
cnt = min( cnt, len(dists[i]) )
print "cnt = " + str(cnt)
x = np.linspace( mm, ma, cnt )
i = 0
for dist in dists:
ecdf = sm.distributions.ECDF( dist )
plt.step( x, ecdf(x), label=labels[i] )
i += 1
dist.sort()
#print dist
plt.title( title )
plt.xticks( xr )
plt.yticks( yr )
plt.xlabel( xl )
plt.ylabel( yl )
plt.legend( labels, loc=4 )
plt.show()
if __name__ == "__main__":
syndicate_data_1k = {}
syndicate_data_1M = {}
syndicate_data_50M = {}
s3_data_20k = {}
s3_data_50M = {}
s3_data_100blk = {}
s3_data_100blk_nocache = {}
plc_data_100blk = {}
syndicate_data_100blk = {}
intersection = []
for expfile in os.listdir( sys.argv[1] ):
expfd = open( os.path.join( sys.argv[1], expfile ), "r" )
expdata = analysis.parse_experiments( expfd )
expfd.close()
if len(expdata['fcdistro']) > 0 and "12" not in expdata['fcdistro']:
print >> sys.stderr, "%s: wrong distro '%s'" % (expfile, expdata['fcdistro'])
continue
syndicate_exp_1k = analysis.read_experiment_data( expdata, "Nr1w-x5-small-syndicate.py" )
syndicate_exp_1M = analysis.read_experiment_data( expdata, "Nr1w-x5-1M-syndicate.py" )
syndicate_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M-syndicate-4.py" )
syndicate_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-syndicate-3.py" )
s3_exp_20k = analysis.read_experiment_data( expdata, "Nr1w-x5.py" )
s3_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-cache-chunked.py" )
plc_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-planetlab-cache-chunked.py" )
s3_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M.py" )
s3_exp_100blk_nocache = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-chunked.py" )
intersect = True
"""
if syndicate_exp_1k != None and len(syndicate_exp_1k) > 0 and syndicate_exp_1k[0] != None:
syndicate_data_1k[expfile] = eval_dict( syndicate_exp_1k[0][0] )
else:
intersect = False
if syndicate_exp_1M != None and len(syndicate_exp_1M) > 0 and syndicate_exp_1M[0] != None:
syndicate_data_1M[expfile] = eval_dict( syndicate_exp_1M[0][0] )
else:
intersect = False
if syndicate_exp_50M != None and len(syndicate_exp_50M) > 0 and syndicate_exp_50M[0] != None:
syndicate_data_50M[expfile] = eval_dict( syndicate_exp_50M[0][0] )
else:
intersect = False
if s3_exp_20k != None and len(s3_exp_20k) > 0 and s3_exp_20k[0] != None:
s3_data_20k[expfile] = eval_dict( s3_exp_20k[0][0] )
else:
intersect = False
if s3_exp_50M != None and len(s3_exp_50M) > 0 and s3_exp_50M[0] != None:
s3_data_50M[expfile] = eval_dict( s3_exp_50M[0][0] )
else:
intersect = False
"""
if s3_exp_100blk != None and len(s3_exp_100blk) > 0 and s3_exp_100blk[0] != None:
s3_data_100blk[expfile] = eval_dict( s3_exp_100blk[0][0] )
else:
intersect = False
if plc_exp_100blk != None and len(plc_exp_100blk) > 0 and plc_exp_100blk[-1] != None:
plc_data_100blk[expfile] = eval_dict( plc_exp_100blk[-1][0] )
else:
intersect = False
if s3_exp_100blk_nocache != None and len(s3_exp_100blk_nocache) > 0 and s3_exp_100blk_nocache[-1] != None:
s3_data_100blk_nocache[expfile] = eval_dict( s3_exp_100blk_nocache[-1][0] )
else:
intersect = False
if syndicate_exp_100blk != None and len(syndicate_exp_100blk) > 0 and syndicate_exp_100blk[-1] != None:
syndicate_data_100blk[expfile] = eval_dict( syndicate_exp_100blk[-1][0] )
else:
intersect = False
if intersect:
intersection.append( expfile )
for expfile in os.listdir( sys.argv[1] ):
if expfile not in intersection:
print >> sys.stderr, "Node %s did not pass all tests" % expfile
print >> sys.stderr, "%s nodes have data" % len(intersection)
syndicate = { 'first_1k': [], 'last_1k': [], 'first_1m': [], 'last_1m': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [] }
s3 = { 'first_20k': [], 'last_20k': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [], 'first_100blk_nocache': [], 'last_100blk_nocache': [] }
plc = {'first_100blk' : [], 'last_100blk': [] }
num_valid = 0
slow = []
for node in intersection:
valid = True
#data_list = [("syndicate 1k", syndicate_data_1k), ("syndicate 1M", syndicate_data_1M), ("syndicate 50M", syndicate_data_50M), ("S3 20k", s3_data_20k), ("S3 50M", s3_data_50M), ("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk)]
data_list = [("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk), ("S3 nocache 100blk", s3_data_100blk_nocache), ("Syndicate 100blk", syndicate_data_100blk)]
for (data_name, data) in data_list:
if data.get(node) == None:
print >> sys.stderr, "%s: no data for %s" % (node, data_name)
valid = False
elif data[node] == None:
print >> sys.stderr, "%s: unparseable data" % (node, data_name)
valid = False
elif len(data[node]['exception']) > 0:
print >> sys.stderr, "%s: exceptions on %s" % (node, data_name)
valid = False
if not valid:
continue;
"""
syndicate['first_1k'].append( syndicate_data_1k[node]['end_recv'][0] - syndicate_data_1k[node]['start_recv'][0] )
syndicate['last_1k'].append( syndicate_data_1k[node]['end_recv'][-1] - syndicate_data_1k[node]['start_recv'][-1] )
syndicate['first_1m'].append( syndicate_data_1M[node]['end_recv'][0] - syndicate_data_1M[node]['start_recv'][0] )
syndicate['last_1m'].append( syndicate_data_1M[node]['end_recv'][-1] - syndicate_data_1M[node]['start_recv'][-1] )
syndicate['first_50m'].append( syndicate_data_50M[node]['end_recv'][0] - syndicate_data_50M[node]['start_recv'][0] )
syndicate['last_50m'].append( syndicate_data_50M[node]['end_recv'][-1] - syndicate_data_50M[node]['start_recv'][-1] )
s3['first_20k'].append( s3_data_20k[node]['end_recv'][0] - s3_data_20k[node]['start_recv'][0] )
s3['last_20k'].append( s3_data_20k[node]['end_recv'][-1] - s3_data_20k[node]['start_recv'][-1] )
s3['first_50m'].append( s3_data_50M[node]['end_recv'][0] - s3_data_50M[node]['start_recv'][0] )
s3['last_50m'].append( s3_data_50M[node]['end_recv'][-1] - s3_data_50M[node]['start_recv'][-1] )
"""
s3['first_100blk'].append( s3_data_100blk[node]['end_recv'][0] - s3_data_100blk[node]['start_recv'][0])
s3['last_100blk'].append( s3_data_100blk[node]['end_recv'][-1] - s3_data_100blk[node]['start_recv'][-1])
s3['first_100blk_nocache'].append( s3_data_100blk_nocache[node]['end_recv'][0] - s3_data_100blk_nocache[node]['start_recv'][0] )
plc['first_100blk'].append( plc_data_100blk[node]['end_recv'][0] - plc_data_100blk[node]['start_recv'][0])
plc['last_100blk'].append( plc_data_100blk[node]['end_recv'][-1] - plc_data_100blk[node]['start_recv'][-1])
syndicate['first_100blk'].append( syndicate_data_100blk[node]['end_recv'][0] - syndicate_data_100blk[node]['start_recv'][0] )
syndicate['last_100blk'].append( syndicate_data_100blk[node]['end_recv'][-1] - syndicate_data_100blk[node]['start_recv'][-1] )
if syndicate['first_100blk'][-1] > 150:
slow.append( node )
num_valid += 1
#print "s3_first_100blk = " + str(s3['first_100blk'])
#print "s3_last_100blk = " + str(s3['last_100blk'])
print "valid: " + str(num_valid)
print "slow: \n" + "\n".join(slow)
# first 1K vs last 1K
cdf_compare( [syndicate['first_100blk'], syndicate['last_100blk'], plc['first_100blk'] ], "Syndicate One-Writer-Many-Reader Download Times", "Seconds", np.arange(0, 1000, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["Syndicate 0% Cache Hit", "Syndicate 100% Cache Hit", "Python HTTP Server and Clients"] )
#cdf_compare( [plc['first_100blk'], s3['first_100blk']], "Amazon S3 vs PLC Cache Miss Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [s3['first_100blk'], s3['first_100blk_nocache']], "Amazon S3 Cache and Direct Download Times", "Seconds", np.arange(0, 1200, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["0% hit cache hit rate", "Direct Download"] )
cdf_compare( [s3['first_100blk'], s3['last_100blk']], "Amazon S3 Cache Miss and Cache Hit Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [syndicate['first_1k'], syndicate['last_1k']] )
cdf_compare( [syndicate['first_50m'], s3['first_50m']] )
cdf_compare( [syndicate['last_50m'], s3['last_50m']] )
#cdf_compare( [syndicate['last_1m'], s3['last_20k']] )
| apache-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-sncosmo/package.py | 5 | 1133 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', sha256='f3969eec5b25f60c70418dbd64765a2b4735bb53c210c61d0aab68916daea588')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
| lgpl-2.1 |
margulies/topography | utils_py/paths_find_similar.py | 4 | 4083 | #get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
import scipy.io as sio
import h5py
import networkx as nx
import numpy as np
import gdist
def calcPaths(num):
length = nx.all_pairs_dijkstra_path(G, num)
length_paths = []
for node in length:
for target in length[node]:
if len(length[node][target]) == num:
length_paths.append(length[node][target])
labeled_paths = labels[length_paths]
same_labels = (squareform(pdist(labeled_paths)) < 1e-10).sum(axis=1)
return length_paths, labeled_paths, same_labels
def uniqueRows(labeled_paths, same_labels, cutoff):
a = labeled_paths[same_labels == cutoff]
uRows = np.unique(a.view(np.dtype((np.void, a.dtype.itemsize*a.shape[1])))).view(a.dtype).reshape(-1, a.shape[1])
return uRows
print uRows
def removePaths(labeled_paths, same_labels, vals, row):
ind = np.in1d(labeled_paths[:,row], vals).reshape(labeled_paths[:,row].shape)
labeled_paths_new = labeled_paths[ind]
same_labels_new = same_labels[ind]
return ind, labeled_paths_new, same_labels_new
def removePathsInverse(labeled_paths, same_labels, vals, row):
ind = np.in1d(labeled_paths[:,row], vals, invert=True).reshape(labeled_paths[:,row].shape)
labeled_paths_new = labeled_paths[ind]
same_labels_new = same_labels[ind]
return ind, labeled_paths_new, same_labels_new
def printAll(labeled_paths,same_labels):
print (('1 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,1)))
print (('2 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,2)))
print (('3 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,3)))
print (('4 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,4)))
print (('5 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,5)))
print (('6 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,6)))
def labelHist(num,labeled_paths):
a = np.zeros([num,18])
for i in xrange(0,num):
a[i] = np.histogram(labeled_paths[:,i],18, range=(1,18))[0]
return a.transpose()
def calcPlace(b):
c = np.zeros([len(b),1])
for i in xrange(0,len(b)):
if np.sum(b[i]) == 0:
c[i] = 0
else:
c[i] = np.average(np.array(xrange(0,num)) + 1, weights=b[i])
return c
'''load data:'''
fp = h5py.File('data/clus.mat')
fp.keys()
adj = fp['clus']['edge'][:]
labels = fp['clus']['edgeNet'][:].flatten()
G = nx.from_numpy_matrix(adj)
num=6
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
ind2, labeled_paths2, same_labels2 = removePathsInverse(labeled_paths1, same_labels1, [7,12,14,15,16], 1)
ind3, labeled_paths3, same_labels3 = removePathsInverse(labeled_paths2, same_labels2, [7,12,14,15,16], 2)
ind4, labeled_paths4, same_labels4 = removePaths(labeled_paths3, same_labels3, [17,3,8], num-2)
ind5, labeled_paths5, same_labels5 = removePaths(labeled_paths4, same_labels4, [13], num-1)
print '\nNum = %s' % num
printAll(labeled_paths1, same_labels1)
'''histograms'''
indX, labeled_pathsX, same_labelsX = removePathsInverse(labeled_paths3, same_labels3, [7,12,14,15,16], 3)
num=5
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
a = labelHist(num,labeled_paths1)
print a
order = np.argsort(calcPlace(a), axis=0) + 1
fl = np.floor(np.sort(calcPlace(a), axis=0))
print '\nNum = %s' % num
print np.hstack((order, fl))
num=6
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
a = labelHist(num,labeled_paths1)
order = np.argsort(calcPlace(a), axis=0) + 1
fl = np.floor(np.sort(calcPlace(a), axis=0))
print '\nNum = %s' % num
print np.hstack((order, fl))
np.array(fl[np.argsort(order, axis=0)]).transpose()
np.array(length_paths)[same_labels == same_labels.max()]
| mit |
xguse/ggplot | setup.py | 13 | 2169 | import os
from setuptools import find_packages, setup
def extract_version():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('ggplot/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__')):
exec(line.strip())
return locals()["__version__"]
def get_package_data():
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('ggplot/tests/baseline_images')]
return {
'ggplot':
baseline_images +
[
"exampledata/*.csv",
"geoms/*.png"
]}
setup(
name="ggplot",
# Increase the version in ggplot/__init__.py
version=extract_version(),
author="Greg Lamp",
author_email="greg@yhathq.com",
url="https://github.com/yhat/ggplot/",
license="BSD",
packages=find_packages(),
package_dir={"ggplot": "ggplot"},
package_data=get_package_data(),
description="ggplot for python",
# run pandoc --from=markdown --to=rst --output=README.rst README.md
long_description=open("README.rst").read(),
# numpy is here to make installing easier... Needs to be at the last position,
# as that's the first installed with "python setup.py install"
install_requires=["six", "statsmodels", "brewer2mpl", "matplotlib", "scipy",
"patsy", "pandas", "numpy"],
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3'],
zip_safe=False)
| bsd-2-clause |
AE4317group07/paparazzi | sw/tools/calibration/calibration_utils.py | 27 | 12769 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
SvichkarevAnatoly/Course-Python-Bioinformatics | semester2/task8/exercise2.py | 1 | 3387 | import numpy
import random
import matplotlib.pyplot as plot
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
a = 1
b = 2
# Build a simple data set with y = x + random
nPoints = 1000
# x values for plotting
xPlot = [(float(i) / float(nPoints) - 0.5) for i in range(nPoints + 1)]
# x needs to be list of lists.
x = [[s] for s in xPlot]
# y (labels) has random noise added to x-value
# set seed
random.seed(1)
numpy.random.seed(1)
y = [a + b * s * s + numpy.random.normal(scale=0.1) for s in xPlot]
# take fixed test set 30% of sample
nSample = int(nPoints * 0.30)
idxTest = random.sample(range(nPoints), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nPoints) if not (idx in idxTest)]
# Define test and training attribute and label sets
xTrain = [x[r] for r in idxTrain]
xTest = [x[r] for r in idxTest]
yTrain = [y[r] for r in idxTrain]
yTest = [y[r] for r in idxTest]
# train a series of models on random subsets of the training data
# collect the models in a list and check error of composite as list grows
# maximum number of models to generate
numTreesMax = 30
# tree depth - typically at the high end
treeDepth = 5
# initialize a list to hold models
modelList = []
predList = []
eps = 0.3
# initialize residuals to be the labels y
residuals = list(yTrain)
for iTrees in range(numTreesMax):
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
modelList[-1].fit(xTrain, residuals)
# make prediction with latest model and add to list of predictions
latestInSamplePrediction = modelList[-1].predict(xTrain)
# use new predictions to update residuals
residuals = [residuals[i] - eps * latestInSamplePrediction[i] \
for i in range(len(residuals))]
latestOutSamplePrediction = modelList[-1].predict(xTest)
predList.append(list(latestOutSamplePrediction))
# build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
# add the first "iModels" of the predictions and multiply by eps
prediction = []
for iPred in range(len(xTest)):
prediction.append(
sum([predList[i][iPred] for i in range(iModels + 1)]) * eps)
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
# mse plot
plot.plot(nModels, mse)
plot.axis('tight')
plot.xlabel('Number of Models in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
# plot.show()
plot.savefig("mseEx2.png")
plot.close()
print min(mse)
# predictions plot
plotList = [0, 14, 29]
lineType = [':', '-.', '--']
plot.figure()
for i in range(len(plotList)):
iPlot = plotList[i]
textLegend = 'Prediction with ' + str(iPlot) + ' Trees'
plot.plot(xTest, allPredictions[iPlot],
label=textLegend, linestyle=lineType[i])
plot.plot(xTest, yTest, label='True y Value', alpha=0.25)
plot.legend(bbox_to_anchor=(1, 0.3))
plot.axis('tight')
plot.xlabel('x value')
plot.ylabel('Predictions')
# plot.show()
plot.savefig("predictionsEx2.png")
plot.close()
# save first 2 tree
with open("tree1Ex2.dot", 'w') as f1:
f1 = tree.export_graphviz(modelList[0], out_file=f1)
with open("tree2Ex2.dot", 'w') as f2:
f2 = tree.export_graphviz(modelList[1], out_file=f2)
| gpl-2.0 |
aleju/imgaug | imgaug/augmentables/heatmaps.py | 2 | 25136 | """Classes to represent heatmaps, i.e. float arrays of ``[0.0, 1.0]``."""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from .. import imgaug as ia
from .base import IAugmentable
class HeatmapsOnImage(IAugmentable):
"""Object representing heatmaps on a single image.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s) on a single image.
Multiple heatmaps may be provided, in which case ``C`` is expected to
denote the heatmap index.
The array must be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
This is expected to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually
being ``3``.
If there is no corresponding image, use ``(H_arr, W_arr)`` instead,
where ``H_arr`` is the height of the heatmap(s) array
(analogous ``W_arr``).
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will
usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will
usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
assert ia.is_np_array(arr), (
"Expected numpy array as heatmap input array, "
"got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max()
# must be adjusted
assert arr.shape[0] > 0 and arr.shape[1] > 0, (
"Expected numpy array as heatmap with height and width greater "
"than 0, got shape %s." % (arr.shape,))
assert arr.dtype.name in ["float32"], (
"Heatmap input array expected to be of dtype float32, "
"got dtype %s." % (arr.dtype,))
assert arr.ndim in [2, 3], (
"Heatmap input array must be 2d or 3d, got shape %s." % (
arr.shape,))
assert len(shape) in [2, 3], (
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, "
"got shape %s." % (shape,))
assert min_value < max_value, (
"Expected min_value to be lower than max_value, "
"got %.4f and %.4f" % (min_value, max_value))
eps = np.finfo(arr.dtype).eps
components = arr.flat[0:50]
beyond_min = np.min(components) < min_value - eps
beyond_max = np.max(components) > max_value + eps
if beyond_min or beyond_max:
ia.warn(
"Value range of heatmap was chosen to be (%.8f, %.8f), but "
"found actual min/max of (%.8f, %.8f). Array will be "
"clipped to chosen value range." % (
min_value, max_value, np.min(arr), np.max(arr)))
arr = np.clip(arr, min_value, max_value)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""Get the heatmap's array in value range provided to ``__init__()``.
The :class:`HeatmapsOnImage` object saves heatmaps internally in the
value range ``[0.0, 1.0]``. This function converts the internal
representation to ``[min, max]``, where ``min`` and ``max`` are
provided to :func:`HeatmapsOnImage.__init__` upon instantiation of
the object.
Returns
-------
(H,W) ndarray or (H,W,C) ndarray
Heatmap array of dtype ``float32``.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
If set to ``None``, no resizing is performed and the size of the
heatmaps array is used.
cmap : str or None, optional
Name of the ``matplotlib`` color map to use when convert the
heatmaps to RGB images.
If set to ``None``, no color map will be used and the heatmaps
will be converted to simple intensity maps.
Returns
-------
list of (H,W,3) ndarray
Rendered heatmaps as ``uint8`` arrays.
Always a **list** containing one RGB image per heatmap array
channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# We use c:c+1 here to get a (H,W,1) array. Otherwise imresize
# would have to re-attach an axis.
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = ia.imresize_single_image(
heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
# import only when necessary (faster startup; optional
# dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(
heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(
heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps.
Expected to be of dtype ``uint8``.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Larger values mean that the heatmaps will be more visible and the
image less visible.
cmap : str or None, optional
Name of the ``matplotlib`` color map to use.
See :func:`HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps,
either the image or the heatmaps can be resized. This parameter
controls which of the two will be resized to the other's size.
Returns
-------
list of (H,W,3) ndarray
Rendered overlays as ``uint8`` arrays.
Always a **list** containing one RGB image per heatmap array
channel.
"""
# assert RGB image
assert image.ndim == 3, (
"Expected to draw on three-dimensional image, "
"got %d dimensions with shape %s instead." % (
image.ndim, image.shape))
assert image.shape[2] == 3, (
"Expected RGB image, got %d channels instead." % (image.shape[2],))
assert image.dtype.name == "uint8", (
"Expected uint8 image, got dtype %s." % (image.dtype.name,))
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8, (
"Expected 'alpha' to be in the interval [0.0, 1.0], got %.4f" % (
alpha))
assert resize in ["heatmaps", "image"], (
"Expected resize to be \"heatmaps\" or \"image\", "
"got %s instead." % (resize,))
if resize == "image":
image = ia.imresize_single_image(
image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap)
# TODO use blend_alpha here
mix = [
np.clip(
(1-alpha) * image + alpha * heatmap_i,
0, 255
).astype(np.uint8)
for heatmap_i
in heatmaps_drawn]
return mix
def invert(self):
"""Invert each component in the heatmap.
This shifts low values towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at a spatial location, ``min`` is the
minimum value in the heatmap and ``max`` is the maximum value.
As the heatmap uses internally a ``0.0`` to ``1.0`` representation,
this simply becomes ``v' = 1.0 - v``.
This function can be useful e.g. when working with depth maps, where
algorithms might have an easier time representing the furthest away
points with zeros, requiring an inverted depth map.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(
1 - self.arr_0to1,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""Pad the heatmaps at their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps.
Must be ``0`` or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps.
Must be ``0`` or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps.
Must be ``0`` or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps.
Must be ``0`` or greater.
mode : string, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and
width ``W'=W+left+right``.
"""
from ..augmenters import size as iasize
arr_0to1_padded = iasize.pad(
self.arr_0to1,
top=top,
right=right,
bottom=bottom,
left=left,
mode=mode,
cval=cval)
# TODO change to deepcopy()
return HeatmapsOnImage.from_0to1(
arr_0to1_padded,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0,
return_pad_amounts=False):
"""Pad the heatmaps until they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the
corresponding sides (left/right or top/bottom) will be padded. In
each case, both of the sides will be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes
the image having twice as much width as height.
mode : str, optional
Padding mode to use.
See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded instance will be returned.
If ``True``, a tuple with two entries will be returned, where
the first entry is the padded instance and the second entry are
the amounts by which each array side was padded. These amounts are
again a tuple of the form ``(top, right, bottom, left)``, with
each value being an integer.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Padded heatmaps as :class:`HeatmapsOnImage` instance.
tuple of int
Amounts by which the instance's array was padded on each side,
given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to
``True``.
"""
from ..augmenters import size as iasize
arr_0to1_padded, pad_amounts = iasize.pad_to_aspect_ratio(
self.arr_0to1,
aspect_ratio=aspect_ratio,
mode=mode,
cval=cval,
return_pad_amounts=True)
# TODO change to deepcopy()
heatmaps = HeatmapsOnImage.from_0to1(
arr_0to1_padded,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
return heatmaps
def avg_pool(self, block_size):
"""Average-pool the heatmap(s) array using a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size.
See :func:`~imgaug.imgaug.pool` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, pad_cval=0.0)
return HeatmapsOnImage.from_0to1(
arr_0to1_reduced,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""Max-pool the heatmap(s) array using a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size.
See :func:`~imgaug.imgaug.pool` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = ia.max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(
arr_0to1_reduced,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
@ia.deprecated(alt_func="HeatmapsOnImage.resize()",
comment="resize() has the exactly same interface.")
def scale(self, *args, **kwargs):
"""Resize the heatmap(s) array given a target size and interpolation."""
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""Resize the heatmap(s) array given a target size and interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Resized heatmaps object.
"""
arr_0to1_resized = ia.imresize_single_image(
self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_resized = np.clip(arr_0to1_resized, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(
arr_0to1_resized,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""Convert this heatmaps object to an ``uint8`` array.
Returns
-------
(H,W,C) ndarray
Heatmap as an ``uint8`` array, i.e. with the discrete value
range ``[0, 255]``.
"""
# TODO this always returns (H,W,C), even if input ndarray was
# originally (H,W). Does it make sense here to also return
# (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""Create a ``float``-based heatmaps object from an ``uint8`` array.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width
and ``C`` is the number of heatmap channels.
Expected dtype is ``uint8``.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
If there is not a corresponding image, use the shape of the
heatmaps array.
min_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 0.0. In most other cases it will
be close to the interval ``[0.0, 1.0]``.
Calling :func:`~imgaug.HeatmapsOnImage.get_arr`, will automatically
convert the interval ``[0.0, 1.0]`` float array to this
``[min, max]`` interval.
max_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 1.0.
See parameter `min_value` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(
arr_0to1, shape,
min_value=min_value,
max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""Create a heatmaps object from a ``[0.0, 1.0]`` float array.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is the height, ``W`` is the width
and ``C`` is the number of heatmap channels.
Expected dtype is ``float32``.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
If there is not a corresponding image, use the shape of the
heatmaps array.
min_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 0.0. In most other cases it will
be close to the interval ``[0.0, 1.0]``.
Calling :func:`~imgaug.HeatmapsOnImage.get_arr`, will automatically
convert the interval ``[0.0, 1.0]`` float array to this
``[min, max]`` interval.
max_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 1.0.
See parameter `min_value` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape,
min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
# TODO change name to change_value_range()?
@classmethod
def change_normalization(cls, arr, source, target):
"""Change the value range of a heatmap array.
E.g. the value range may be changed from the interval ``[0.0, 1.0]``
to ``[-1.0, 1.0]``.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as a
tuple ``(min, max)``, where both are ``float`` values.
target : tuple of float
Desired output value range of the array, given as a
tuple ``(min, max)``, where both are ``float`` values.
Returns
-------
ndarray
Input array, with value range projected to the desired target
value range.
"""
assert ia.is_np_array(arr), (
"Expected 'arr' to be an ndarray, got type %s." % (type(arr),))
def _validate_tuple(arg_name, arg_value):
assert isinstance(arg_value, tuple), (
"'%s' was not a HeatmapsOnImage instance, "
"expected type tuple then. Got type %s." % (
arg_name, type(arg_value),))
assert len(arg_value) == 2, (
"Expected tuple '%s' to contain exactly two entries, "
"got %d." % (arg_name, len(arg_value),))
assert arg_value[0] < arg_value[1], (
"Expected tuple '%s' to have two entries with "
"entry 1 < entry 2, got values %.4f and %.4f." % (
arg_name, arg_value[0], arg_value[1]))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
_validate_tuple("source", source)
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
_validate_tuple("target", target)
# Check if source and target are the same (with a tiny bit of
# tolerance) if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both
# be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
# TODO make this a proper shallow-copy
def copy(self):
"""Create a shallow copy of the heatmaps object.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""Create a deep copy of the heatmaps object.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(
self.get_arr(),
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
| mit |
LaboratoireMecaniqueLille/crappy | crappy/blocks/grapher.py | 1 | 5641 | # coding: utf-8
import numpy as np
from .block import Block
from .._global import OptionalModule
try:
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
except (ModuleNotFoundError, ImportError):
plt = OptionalModule("matplotlib")
Button = OptionalModule("matplotlib")
class Grapher(Block):
"""The grapher receive data from a block (via a :ref:`Link`) and plots it."""
def __init__(self,
*labels,
length=0,
freq=2,
maxpt=20000,
window_size=(8, 8),
window_pos=None,
interp=True,
backend="TkAgg"):
"""Sets the args and initializes the parent class.
Args:
*labels (:obj:`tuple`): Tuples of the columns labels of input data for
plotting. You can add as much as you want, depending on your
performances. The first value is the `x` label, the second is the `y`
label.
length (:obj:`int`, optional): If `0` the graph is static and displays
all data from the start of the assay. Else only displays the last
``length`` received chunks, and drops the previous ones.
freq (:obj:`float`, optional): The refresh rate of the graph. May cause
high memory consumption if set too high.
maxpt (:obj:`int`, optional): The maximum number of points displayed on
the graph. When reaching this limit, the block deletes one point out of
two but this is almost invisible to the user.
window_size (:obj:`tuple`, optional): The size of the graph, in inches.
window_pos (:obj:`tuple`, optional): The position of the graph in pixels.
The first value is for the `x` direction, the second for the `y`
direction. The origin is the top left corner. Works with multiple
screens.
interp (:obj:`bool`, optional): If :obj:`True`, the points of data will
be linked to the following by straight lines. Else, each value wil be
displayed as constant until the next update.
backend (:obj:`int`, optional): The :mod:`matplotlib` backend to use.
Example:
::
graph = Grapher(('t(s)', 'F(N)'), ('t(s)', 'def(%)'))
will plot a dynamic graph with two lines plot (`F=f(t)` and `def=f(t)`).
::
graph = Grapher(('def(%)', 'F(N)'), length=0)
will plot a static graph.
::
graph = Grapher(('t(s)', 'F(N)'), length=30)
will plot a dynamic graph displaying the last 30 chunks of data.
"""
Block.__init__(self)
self.niceness = 10
self.length = length
self.freq = freq
self.maxpt = maxpt
self.window_size = window_size
self.window_pos = window_pos
self.interp = interp
self.backend = backend
self.labels = labels
def prepare(self):
if self.backend:
plt.switch_backend(self.backend)
self.f = plt.figure(figsize=self.window_size)
self.ax = self.f.add_subplot(111)
self.lines = []
for _ in self.labels:
if self.interp:
self.lines.append(self.ax.plot([], [])[0])
else:
self.lines.append(self.ax.step([], [])[0])
# Keep only 1/factor points on each line
self.factor = [1 for _ in self.labels]
# Count to drop exactly 1/factor points, no more and no less
self.counter = [0 for _ in self.labels]
legend = [y for x, y in self.labels]
plt.legend(legend, bbox_to_anchor=(-0.03, 1.02, 1.06, .102), loc=3,
ncol=len(legend), mode="expand", borderaxespad=1)
plt.xlabel(self.labels[0][0])
plt.ylabel(self.labels[0][1])
plt.grid()
self.axclear = plt.axes([.8, .02, .15, .05])
self.bclear = Button(self.axclear, 'Clear')
self.bclear.on_clicked(self.clear)
if self.window_pos:
mng = plt.get_current_fig_manager()
mng.window.wm_geometry("+%s+%s" % self.window_pos)
plt.draw()
plt.pause(.001)
def clear(self, event=None):
for line in self.lines:
line.set_xdata([])
line.set_ydata([])
self.factor = [1 for _ in self.labels]
self.counter = [0 for _ in self.labels]
def loop(self):
# We need to recv data from all the links, but keep
# ALL of the data, even with the same label (so not get_all_last)
data = self.recv_all_delay()
for i, (lx, ly) in enumerate(self.labels):
x, y = 0, 0 # So that if we don't find it, we do nothing
for d in data:
if lx in d and ly in d: # Find the first input with both labels
dx = d[lx][self.factor[i]-self.counter[i]-1::self.factor[i]]
dy = d[ly][self.factor[i]-self.counter[i]-1::self.factor[i]]
self.counter[i] = (self.counter[i]+len(d[lx])) % self.factor[i]
x = np.hstack((self.lines[i].get_xdata(), dx))
y = np.hstack((self.lines[i].get_ydata(), dy))
break
if isinstance(x, int):
break
if self.length and len(x) >= self.length:
# Remove the beginning if the graph is dynamic
x = x[-self.length:]
y = y[-self.length:]
elif len(x) > self.maxpt:
# Reduce the number of points if we have to many to display
print("[Grapher] Too many points on the graph {} ({}>{})".format(
i, len(x), self.maxpt))
x, y = x[::2], y[::2]
self.factor[i] *= 2
print("[Grapher] Resampling factor is now {}".format(self.factor[i]))
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
self.ax.relim() # Update the window
self.ax.autoscale_view(True, True, True)
self.f.canvas.draw() # Update the graph
self.f.canvas.flush_events()
def finish(self):
plt.close("all")
| gpl-2.0 |
xi-studio/anime | newnet/show.py | 1 | 1080 | import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix
head = np.random.randint(low=0,high=10,size=20)
tail = np.random.randint(low=0,high=10,size=20)
row = np.arange(20)
data = np.ones(20)
a = csc_matrix((data, (row,head)),shape=(20,10)).toarray()
b = csc_matrix((data, (row,tail)),shape=(20,10)).toarray()
def plotCM(cm,title,colorbarOn,givenAX):
ax = givenAX
idx = np.arange(10)
idy = np.arange(20)
plt.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=5.0)
ax.set_xticks(range(10))
ax.set_xticklabels(idx)
plt.title(title,size=12)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j,i,int(cm[i,j]),va='center', ha='center')
#fig1=plt.subplot(1, 3, 1)
#plotCM(a,"Head Index","off",fig1.axes)
fig2=plt.subplot(1, 1, 1)
w = np.random.randn(20,1)
plt.matshow(w, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
for x in range(20):
fig2.axes.text(0,x,w[x,0],va='center', ha='center')
#fig3=plt.subplot(1, 3, 3)
#plotCM(b,"Tail Index","off",fig3.axes)
plt.show()
| mit |
cemonatk/tools | DSBSCModulation.py | 2 | 1699 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__date__ = '10.10.2016'
__author__ = 'cemonatk'
from numpy import cos,pi,sin,arange
from matplotlib.pyplot import plot,show,title,subplot,suptitle,xlabel,imshow
from matplotlib.image import imread
from Tkinter import *
def DSBSCModulate(fm,fc,aralik):
suptitle('Analog-Lab')
t = arange(1,aralik)
tc = cos(2*pi*fc*t)
ms = sin(2*pi*fm*t)
DSB = tc*ms
subplot(2,2,1)
plot(tc)
title('Carrier Wave')
subplot(2,2,2)
plot(ms)
title('message Sinyali')
subplot(2,1,2)
plot(DSB)
title('DSB-SC Modulasyonu')
show()
def AmModulation():
pass
def Show():
img = imread('picture.jpg')
imshow(img)
title('MATLAB Kullanmayi Acilen Birakman Gerek...')
show()
def StrToFloat():
carrier = 1.0/float(carrierfreq.get())
message = 1.0/float(messagefreq.get())
try:
aralik = int(aralik.get())
except:
aralik = 360
DsbscCiz(message, carrier, aralik)
kok = Tk()
kok.title("Analog-Lab")
carrierfreq = StringVar()
e = Entry(kok, textvariable=carrierfreq)
e.pack()
carrierfreq.set("Carrier Freq")
messagefreq = StringVar()
e = Entry(kok, textvariable=messagefreq)
e.pack()
messagefreq.set("message freq")
aralik = StringVar()
e = Entry(kok, textvariable=aralik)
e.pack()
aralik.set("Aralık(360 varsayılan)")
Button(kok, text='Draw DSB-SC', height=2, width=35, command=StrToFloat).pack()
Button(kok, text='Gizli Suprizi Goster', height=2, width=35, command=Show).pack()
Button(kok, text='Exit', command=kok.destroy).pack()
kok.mainloop()
Button(kok, text='Draw DSB-SC', height=2, width=35, command=lambda: DSBSCModulate(messagefreq,carrier,range)).pack()
| mit |
goodfeli/pylearn2 | pylearn2/scripts/datasets/browse_small_norb.py | 44 | 6901 | #!/usr/bin/env python
import sys
import argparse
import pickle
import warnings
import exceptions
import numpy
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
from pylearn2.datasets import norb
warnings.warn("This script is deprecated. Please use ./browse_norb.py "
"instead. It is kept around as a tester for deprecated class "
"datasets.norb.SmallNORB",
exceptions.DeprecationWarning)
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
Ginkgo-Biloba/Misc-Python | numpy/SciPyInt.py | 1 | 3425 | # coding=utf-8
import numpy as np
from scipy import integrate as intgrt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import sqrt
# 计算半球的体积
def ballVolume():
def halfBall(x, y):
return sqrt(1 - x**2 - y**2)
def halfCircle(x):
return sqrt(1 - x**2)
(vol, error) = intgrt.dblquad(halfBall, -1, 1, lambda x: -halfCircle(x), lambda x: halfCircle(x))
print ("vol =", vol)
# 对常微分方程组积分
# 计算洛伦茨吸引子的轨迹
def LorenzAttactor():
# 给出位置矢量 w 和三个参数 sigma rho beta 计算出速度矢量 dx dy dz
def lorenz(w, t, sigma, rho, beta):
(x, y, z) = w.tolist()
return (sigma * (y - x), x * (rho - z), x * y - beta * z)
t = np.arange(0, 20, 0.01) # 创建时间点
# 调用 ode 对 lorenz 进行求解 用两个不同的初始值
track1 = intgrt.odeint(lorenz, (0.0, 1.0, 0.0), t, args=(10.0, 28.0, 2.7))
track2 = intgrt.odeint(lorenz, (0.0, 1.01, 0.0), t, args=(10.0, 28.0, 2.7))
# 绘图
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(track1[:, 0], track1[:, 1], track1[:, 2], label="$y=1.0$")
ax.plot(track2[:, 0], track2[:, 1], track2[:, 2], label="$y=1.01$")
plt.legend(loc="best")
plt.show()
# 质量-弹簧-阻尼系统
# Mx'' + bx' + kx = F
def msd(xu, t, M, k, b, F):
(x, u) = xu.tolist()
dx = u
du = (F - k * x - b * u) / M
return (dx, du)
def msdDemo():
# 初始滑块在位移 x = -1.0 处 起始速度为 0 外部控制力恒为 1.0
initxu = (-1.0, 0.0)
(M, k, b, F) = (1.0, 0.5, 0.2, 1.0)
t = np.arange(0, 40, 0.02)
rst = intgrt.odeint(msd, initxu, t, args=(M, k, b, F))
(fig, (ax1, ax2)) = plt.subplots(2, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax1.legend(); ax2.legend()
plt.show()
# 质量-弹簧-阻尼系统
class MassSpringDamper(object):
def __init__(self, M, k, b, F):
(self.M, self.k, self.b, self.F) = (M, k, b, F)
# 求导函数
def dee(self, t, xu):
(x, u) = xu.tolist()
dx = u
du = (self.F - self.k * x - self.b * u) / self.M
return [dx, du] # 要求返回列表而不是元组
# 采用 PID 控制器
class PID(object):
def __init__(self, kp, ki, kd, dt):
(self.kp, self.ki, self.kd, self.dt) = (kp, ki, kd, dt)
self.lastErr = None
self.x = 0.0
def update(self, err):
p = self.kp * err
i = self.ki * self.x
if self.lastErr is None:
d = 0.0
else:
d = self.kd * (err - self.lastErr) / self.dt
self.x += err * self.dt
self.lastErr = err
return p + i + d
# 控制外力 F 使滑块更迅速地停止在位移 2.0 处
def msdPID(kp, ki, kd, dt):
stm = MassSpringDamper(M=1.0, k=0.5, b=0.2, F=1.0)
initxu = (-1.0, 0.0)
pid = PID(kp, ki, kd, dt)
r = intgrt.ode(stm.dee)
r.set_integrator("vode", method="bdf")
r.set_initial_value(initxu, 0)
t = list(); rst = list(); FArr = list()
while (r.successful() and (r.t + dt < 3)):
r.integrate(r.t + dt)
t.append(r.t)
rst.append(r.y)
err = 2.0 - r.y[0]
F = pid.update(err)
stm.F = F
FArr.append(F)
rst = np.array(rst)
t = np.array(t)
FArr = np.array(FArr)
(fig, (ax1, ax2, ax3)) = plt.subplots(3, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax3.plot(t, FArr, label=u"控制力 F")
ax1.legend(); ax2.legend(); ax3.legend()
plt.show()
if (__name__ == "__main__"):
# ballVolume()
LorenzAttactor()
# msdDemo()
# msdPID(19.29, 1.41, 6.25, 0.02) # 最优的一组数
| gpl-3.0 |
zzcclp/spark | python/pyspark/pandas/tests/plot/test_frame_plot_matplotlib.py | 14 | 18666 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from distutils.version import LooseVersion
from io import BytesIO
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_matplotlib,
matplotlib_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_matplotlib:
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("agg")
@unittest.skipIf(not have_matplotlib, matplotlib_requirement_message)
class DataFramePlotMatplotlibTest(PandasOnSparkTestCase, TestUtils):
sample_ratio_default = None
@classmethod
def setUpClass(cls):
super().setUpClass()
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.set_option("plotting.backend", "matplotlib")
set_option("plotting.backend", "matplotlib")
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@staticmethod
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
def test_line_plot(self):
def check_line_plot(pdf, psdf):
ax1 = pdf.plot(kind="line", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="line", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.line(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.line(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_line_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_line_plot(pdf1, psdf1)
def test_area_plot(self):
def check_area_plot(pdf, psdf):
ax1 = pdf.plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.area(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.area(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf = self.pdf1
psdf = self.psdf1
check_area_plot(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
check_area_plot(pdf, psdf)
def test_area_plot_stacked_false(self):
def check_area_plot_stacked_false(pdf, psdf):
ax1 = pdf.plot.area(stacked=False)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(stacked=False)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when stacked=False because default is True
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_stacked_false(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_stacked_false(pdf, psdf)
def test_area_plot_y(self):
def check_area_plot_y(pdf, psdf, y):
ax1 = pdf.plot.area(y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when y is specified
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_y(pdf, psdf, y="sales")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_y(pdf, psdf, y=("x", "sales"))
def test_barh_plot_with_x_y(self):
def check_barh_plot_with_x_y(pdf, psdf, x, y):
ax1 = pdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(x=x, y=y, colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(x=x, y=y, colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing plot with specified x and y
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot_with_x_y(pdf1, psdf1, x="lab", y="val")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot_with_x_y(pdf1, psdf1, x=("x", "lab"), y=("y", "val"))
def test_barh_plot(self):
def check_barh_plot(pdf, psdf):
ax1 = pdf.plot(kind="barh", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing when x or y is not assigned
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot(pdf1, psdf1)
def test_bar_plot(self):
def check_bar_plot(pdf, psdf):
ax1 = pdf.plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_bar_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_bar_plot(pdf1, psdf1)
def test_bar_with_x_y(self):
# this is testing plot with specified x and y
pdf = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf = ps.from_pandas(pdf)
ax1 = pdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(x="lab", y="val", colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(x="lab", y="val", colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf.columns = columns
psdf.columns = columns
ax5 = pdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin5 = self.plot_to_base64(ax5)
ax6 = psdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin6 = self.plot_to_base64(ax6)
self.assertEqual(bin5, bin6)
ax7 = pdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin7 = self.plot_to_base64(ax7)
ax8 = psdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin8 = self.plot_to_base64(ax8)
self.assertEqual(bin7, bin8)
def test_pie_plot(self):
def check_pie_plot(pdf, psdf, y):
ax1 = pdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax11, ax12 = pdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
ax11, ax12 = pdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
pdf1 = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf1 = ps.from_pandas(pdf1)
check_pie_plot(pdf1, psdf1, y="mass")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "mass"), ("y", "radius")])
pdf1.columns = columns
psdf1.columns = columns
check_pie_plot(pdf1, psdf1, y=("x", "mass"))
def test_pie_plot_error_message(self):
# this is to test if error is correctly raising when y is not specified
# and subplots is not set to True
pdf = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf = ps.from_pandas(pdf)
with self.assertRaises(ValueError) as context:
psdf.plot.pie(figsize=(5, 5), colormap="Paired")
error_message = "pie requires either y column or 'subplots=True'"
self.assertTrue(error_message in str(context.exception))
def test_scatter_plot(self):
def check_scatter_plot(pdf, psdf, x, y, c):
ax1 = pdf.plot.scatter(x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="scatter", x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="scatter", x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# check when keyword c is given as name of a column
ax1 = pdf.plot.scatter(x=x, y=y, c=c, s=50)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y, c=c, s=50)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# Use pandas scatter plot example
pdf1 = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
psdf1 = ps.from_pandas(pdf1)
check_scatter_plot(pdf1, psdf1, x="a", y="b", c="c")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
pdf1.columns = columns
psdf1.columns = columns
check_scatter_plot(pdf1, psdf1, x=("x", "a"), y=("x", "b"), c=("y", "c"))
def test_hist_plot(self):
def check_hist_plot(pdf, psdf):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="hist", bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="hist", bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
non_numeric_pdf = self.pdf1.copy()
non_numeric_pdf.c = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
non_numeric_psdf = ps.from_pandas(non_numeric_pdf)
ax1 = non_numeric_pdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin1 = self.plot_to_base64(ax1)
ax2 = non_numeric_psdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_hist_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_hist_plot(pdf1, psdf1)
def test_kde_plot(self):
def moving_average(a, n=10):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def check_kde_plot(pdf, psdf, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.kde(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.kde(*args, **kwargs)
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
# TODO: Due to implementation difference, the output is different comparing
# to pandas'. We should identify the root cause of difference, and reduce
# the diff.
# Note: Data is from 1 to 50. So, it smooths them by moving average and compares
# both.
self.assertTrue(
np.allclose(moving_average(actual), moving_average(expected), rtol=3.0)
)
finally:
ax1.cla()
ax2.cla()
pdf1 = self.pdf1
psdf1 = self.psdf1
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
pdf1.columns = columns
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_frame_plot_matplotlib import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
zoranzhao/NoSSim | NoS_Vgraph/core_util_plot.py | 1 | 5568 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def plot(srv_app, srv_lwip, cli_app, cli_lwip):
#srv_app = {0:[],1:[],2:[]}
#srv_lwip = {0:[],1:[],2:[]}
#cli_app = {0:[],1:[],2:[]}
#cli_lwip = {0:[],1:[],2:[]}
O2lwip=cli_lwip[2]
O2comp=cli_app[2]
O1lwip=cli_lwip[1]
O1comp=cli_app[1]
O0lwip=cli_lwip[0]
O0comp=cli_app[0]
colorsred = ['brown', 'red', 'tomato', 'lightsalmon']
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =['navy', 'blue', 'steelblue', 'lightsteelblue']
hatches = ['//', '++', 'xxx', 'oo','\\\\\\', 'OO', '..' , '---', "**"]
label_size=15
font_size=15
#client
N = 3
width = 0.25 # the width of the bars
xtra_space = 0.02
ind = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind1 = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind2 = np.arange(N) + 2+(N+1) - (width*3+xtra_space*2)/2 # the x locations for the groups
ind3 = np.arange(N) + 2+N+1+N+1 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind = np.append(ind1, ind2)
ind = np.append(ind, ind3)
#ind = np.append(ind, ind4)
#ind = np.append(ind, ind5)
fig, ax = plt.subplots(2)
a1 = ax[0].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[0].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[0].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[0].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[0].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[0].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
OLevel = ["O-0", "O-1", "O-2", "O-3"]
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - lwIP", " - App."]
legend_size=16
plt.figlegend(
(
a1, a2,
b1, b2,
c1, c2
),
(
OLevel[2]+duration_type[1], OLevel[2]+duration_type[0],
OLevel[1]+duration_type[1], OLevel[1]+duration_type[0],
OLevel[0]+duration_type[1], OLevel[0]+duration_type[0]
),
scatterpoints=1,
loc='upper center',
ncol=3,
prop={'size':legend_size})
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[0].set_xticks( xticks )
ax[0].set_xticks( xticks_minor, minor=True )
ax[0].set_xticklabels( xlbls )
ax[0].set_xlim( 1, 13 )
ax[0].grid( 'off', axis='x' )
ax[0].grid( 'off', axis='x', which='minor' )
# vertical alignment of xtick labels
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[0].get_xticklabels( ), va ):
t.set_y( y )
ax[0].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
#ax.tick_params( axis='x', which='major', direction='out', length=10 )
ax[0].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[0].get_yticks()
ax[0].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#server
O2lwip=srv_lwip[2]
O2comp=srv_app[2]
O1lwip=srv_lwip[1]
O1comp=srv_app[1]
O0lwip=srv_lwip[0]
O0comp=srv_app[0]
a1 = ax[1].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[1].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[1].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[1].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[1].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[1].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - Communication", " - Computation"]
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[1].set_xticks( xticks )
ax[1].set_xticks( xticks_minor, minor=True )
ax[1].set_xticklabels( xlbls )
ax[1].set_xlim( 1, 13 )
ax[1].grid( 'off', axis='x' )
ax[1].grid( 'off', axis='x', which='minor' )
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[1].get_xticklabels( ), va ):
t.set_y( y )
ax[1].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
ax[1].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[1].get_yticks()
ax[1].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
# add some text for labels, title and axes ticks
ax[0].set_ylabel('Core Utilization', fontsize=label_size)
ax[0].set_xlabel('Client', fontsize=label_size)
ax[1].set_ylabel('Core Utilization', fontsize=label_size)
ax[1].set_xlabel('Server', fontsize=label_size)
ax[0].tick_params(axis='y', labelsize=font_size)
ax[1].tick_params(axis='y', labelsize=font_size)
ax[0].tick_params(axis='x', labelsize=font_size)
ax[1].tick_params(axis='x', labelsize=font_size)
plt.show()
| bsd-3-clause |
adammenges/statsmodels | statsmodels/examples/tsa/arma_plots.py | 33 | 2516 | '''Plot acf and pacf for some ARMA(1,1)
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
| bsd-3-clause |
rgllm/uminho | 04/CN/TP3/src/src/parser/PsoTools.py | 1 | 4783 | import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
| mit |
jat255/seaborn | seaborn/timeseries.py | 4 | 15212 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = utils.get_color_cycle()
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, facecolor=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
JensWehner/votca-scripts | xtp/xtp_kmc_plottrajectory.py | 2 | 3563 | #!/usr/bin/env python
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import numpy as np
import matplotlib.pyplot as plt
import csv
import re
import sys
from __tools__ import MyParser
parser=MyParser(description="Tool to visualize kmc trajectory .csv files" )
parser.add_argument("-t","--trajectory",type=str,nargs="+",required=True,help="Files to visualize .csv format")
parser.add_argument("--steps",type=int,default=-1,help="Maximum number of steps to read in. default:-1")
args=parser.parse_args()
#parser.add_argument('-p',"--plot", action='store_const', const=1, default=0,help="Calculate exciton coupling in classical limit")
if type(args.trajectory)==str:
args.trajectory=[args.trajectory]
class carrierstorage(object):
numberofobjects= 0
def __init__(self):
carrierstorage.numberofobjects+=1
self.id=carrierstorage.numberofobjects
self.traj=[]
def append(self,posvec):
self.traj.append(posvec)
def array(self):
return np.array(self.traj)
def info(self):
print "Carrier No",self.id
print self.array().shape
listofcarriers=[]
for filename in args.trajectory:
locallistofcarriers=[]
with open(filename,"r") as f:
reader = csv.reader(f, dialect="excel-tab")
conversion=1
start=2
for i,row in enumerate(reader):
#print i
if args.steps>0 and i>args.steps:
break
if i==0:
commentlinelength=len(row)
if "carrier" in ''.join(row):
noofcharges=''.join(row).count("carrier")/3
else:
noofcharges=len(row)/3
print "Found {} carriers in file {}".format(noofcharges,filename)
if noofcharges==0:
break
for i in range(noofcharges):
newcarrier=carrierstorage()
listofcarriers.append(newcarrier)
locallistofcarriers.append(newcarrier)
continue
if i==1:
#print row
if len(row)!=commentlinelength:
print "header and trajectory do not have same number of columns. Ignoring steps colum"
start=1
nprow=np.array(row,dtype=float)
firstcoord=nprow[start:start+3]
if np.sqrt(np.sum(firstcoord**2))<0.0001:
print "Units is probably meter instead of nm. Old trajectory format"
conversion=1E9
else:
print "Units is probably nm."
if i>0:
nprow=np.array(row,dtype=float)
for j,carrier in enumerate(locallistofcarriers):
s=start+j*3
carrier.append(conversion*nprow[s:s+3])
print "Found {} carriers in total".format(len(listofcarriers))
if len(listofcarriers)==0:
print "No carriers found"
sys.exit()
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.set_xlabel('x [nm]')
ax.set_ylabel('y [nm]')
ax.set_zlabel('z [nm]')
for i in listofcarriers:
posarray=i.array()
#print posarray
ax.plot(posarray[:,0], posarray[:,1], posarray[:,2])
ax.scatter(posarray[0,0], posarray[0,1], posarray[0,2],s=200,marker="+",c="black")
ax.scatter(posarray[-1,0], posarray[-1,1], posarray[-1,2],s=400,marker="x",c="black")
max_range = np.array([posarray[:,0].max()-posarray[:,0].min(), posarray[:,1].max()-posarray[:,1].min(), posarray[:,2].max()-posarray[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(posarray[:,0].max()+posarray[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(posarray[:,1].max()+posarray[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(posarray[:,2].max()+posarray[:,2].min())
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.grid()
plt.show()
| apache-2.0 |
richardhsu/naarad | src/naarad/graphing/matplotlib_naarad.py | 4 | 9100 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import logging
import naarad.naarad_constants as CONSTANTS
logger = logging.getLogger('naarad.graphing.matplotlib')
def convert_to_mdate(date_str):
mdate = mdates.epoch2num(int(date_str) / 1000)
return mdate
# MPL-WA-07
# matplotlib does not rotate colors correctly when using multiple y axes. This method fills in that gap.
def get_current_color(index):
return CONSTANTS.COLOR_PALETTE[index % len(CONSTANTS.COLOR_PALETTE)]
def get_graph_metadata(plots):
height = 0
width = 0
title = ''
for plot in plots:
if plot.graph_height > height:
height = plot.graph_height
if plot.graph_width > width:
width = plot.graph_width
if title == '':
title = plot.graph_title
elif title != plot.graph_title:
title = title + ',' + plot.graph_title
return height / 80, width / 80, title
def curate_plot_list(plots):
delete_nodes = []
for plot in plots:
if os.path.exists(plot.input_csv):
if not os.path.getsize(plot.input_csv):
logger.warning("%s file is empty. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
else:
logger.warning("%s file does not exist. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
for node in delete_nodes:
plots.remove(node)
return plots
def highlight_region(plt, start_x, end_x):
"""
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
"""
start_x = convert_to_mdate(start_x)
end_x = convert_to_mdate(end_x)
plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)
def graph_data(list_of_plots, output_directory, resource_path, output_filename):
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(list_of_plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
current_axis = axis
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
timestamp, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',', converters={0: convert_to_mdate})
maximum_yvalue = numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)
minimum_yvalue = numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)
if current_plot_count == 0:
current_axis.yaxis.set_ticks_position('left')
if current_plot_count > 1:
current_axis = axis.twinx()
current_axis.yaxis.grid(False)
# Set right y-axis for additional plots
current_axis.yaxis.set_ticks_position('right')
# Offset the right y axis to avoid overlap
current_axis.spines['right'].set_position(('axes', 1 + CONSTANTS.Y_AXIS_OFFSET * (current_plot_count - 2)))
current_axis.spines['right'].set_smart_bounds(False)
current_axis.spines['right'].set_color(get_current_color(current_plot_count))
current_axis.set_frame_on(True)
current_axis.patch.set_visible(False)
current_axis.set_ylabel(plot.y_label, color=get_current_color(current_plot_count), fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
current_axis.set_ylim([minimum_yvalue, maximum_yvalue])
if plot.graph_type == 'line':
current_axis.plot_date(x=timestamp, y=yval, linestyle='-', marker=None, color=get_current_color(current_plot_count))
else:
current_axis.plot_date(x=timestamp, y=yval, marker='.', color=get_current_color(current_plot_count))
y_ticks = current_axis.get_yticklabels()
for y_tick in y_ticks:
y_tick.set_color(get_current_color(current_plot_count))
y_tick.set_fontsize(CONSTANTS.Y_TICKS_FONTSIZE)
for x_tick in current_axis.get_xticklabels():
x_tick.set_fontsize(CONSTANTS.X_TICKS_FONTSIZE)
if plot.highlight_regions is not None:
for region in plot.highlight_regions:
highlight_region(plt, str(region.start_timestamp), str(region.end_timestamp))
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
axis.set_xlabel('Time')
x_date_format = mdates.DateFormatter(CONSTANTS.X_TICKS_DATEFORMAT)
axis.xaxis.set_major_formatter(x_date_format)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align="center"><strong>' + os.path.basename(plot_file_name) +
'</strong></p></div><hr />')
return True, os.path.join(output_directory, output_filename + '.div')
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):
"""
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
"""
maximum_yvalue = -float('inf')
minimum_yvalue = float('inf')
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
# Generate each plot on the graph
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',')
axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)
axis.legend()
maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count))
minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count))
# Set properties of the plots
axis.yaxis.set_ticks_position('left')
axis.set_xlabel(plots[0].x_label)
axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
axis.set_ylim([minimum_yvalue, maximum_yvalue])
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>')
return True, os.path.join(output_directory, output_filename + '.div')
| apache-2.0 |
weidel-p/nest-simulator | pynest/examples/brette_gerstner_fig_3d.py | 12 | 3030 | # -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
nest.voltage_trace.show()
| gpl-2.0 |
yugangzhang/chxanalys | chxanalys/chx_compress.py | 1 | 37856 | import os,shutil
from glob import iglob
import matplotlib.pyplot as plt
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db,
get_images,LogNorm, RUN_GUI)
from chxanalys.chx_generic_functions import (create_time_slice,get_detector, get_fields, get_sid_filenames,
load_data)
import struct
from tqdm import tqdm
from contextlib import closing
from multiprocessing import Pool
import dill
import sys
import gc
import pickle as pkl
from eiger_io.pims_reader import EigerImages
def run_dill_encoded(what):
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args, callback=None):
return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback)
def map_async(pool, fun, args ):
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
def go_through_FD(FD):
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
def compress_eigerdata( images, mask, md, filename=None, force_compress=False,
bad_pixel_threshold=1e15, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30, nobytes=4,bins=1, bad_frame_list=None,
para_compress= False, num_sub=100, dtypes='uid',reverse =True,
num_max_para_process=500, with_pickle=False, direct_load_data=False, data_path=None):
end= len(images)//bins
if filename is None:
filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid']
if dtypes!= 'uid':
para_compress= False
else:
if para_compress:
images='foo'
#para_compress= True
#print( dtypes )
if force_compress:
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process, with_pickle= with_pickle,
direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
if not os.path.exists( filename ):
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
print ("Using already created compressed file with filename as :%s."%filename)
beg=0
return read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
def read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False,
direct_load_data=False,data_path=None):
'''
Read already compress eiger data
Return
mask
avg_img
imsum
bad_frame_list
'''
#should use try and except instead of with_pickle in the future!
CAL = False
if not with_pickle:
CAL = True
else:
try:
mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) )
except:
CAL = True
if CAL:
FD = Multifile( filename, beg, end)
imgsum = np.zeros( FD.end- FD.beg, dtype= np.float )
avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float )
imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold,
hot_pixel_threshold=hot_pixel_threshold, plot_ = False,
bad_frame_list=bad_frame_list)
avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ )
FD.FID.close()
return mask, avg_img, imgsum, bad_frame_list_
def para_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,
num_max_para_process=500, cpu_core_number=72, with_pickle=True,
direct_load_data=False, data_path=None):
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
N = int( np.ceil( N/ bins ) )
Nf = int( np.ceil( N/ num_sub ) )
if Nf > cpu_core_number:
print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number)
num_sub_old = num_sub
num_sub = int( np.ceil(N/cpu_core_number))
Nf = int( np.ceil( N/ num_sub ) )
print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub ))
create_compress_header( md, filename +'-header', nobytes, bins )
#print( 'done for header here')
results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename,
num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes,
num_max_para_process=num_max_para_process,
direct_load_data=direct_load_data, data_path=data_path)
res_ = np.array( [ results[k].get() for k in list(sorted(results.keys())) ] )
imgsum = np.zeros( N )
bad_frame_list = np.zeros( N, dtype=bool )
good_count = 1
for i in range( Nf ):
mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i]
imgsum[i*num_sub: (i+1)*num_sub] = imgsum_
bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_
if i==0:
mask = mask_
avg_img = np.zeros_like( avg_img_ )
else:
mask *= mask_
if not np.sum( np.isnan( avg_img_)):
avg_img += avg_img_
good_count += 1
bad_frame_list = np.where( bad_frame_list )[0]
avg_img /= good_count
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
print( 'Combining the seperated compressed files together...')
combine_compressed( filename, Nf, del_old=True)
del results
del res_
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
def combine_compressed( filename, Nf, del_old=True):
old_files = np.concatenate( np.array([ [filename +'-header'],
[filename + '_temp-%i.tmp'%i for i in range(Nf) ]]))
combine_binary_files(filename, old_files, del_old )
def combine_binary_files(filename, old_files, del_old = False):
'''Combine binary files together'''
fn_ = open(filename, 'wb')
for ftemp in old_files:
shutil.copyfileobj( open(ftemp, 'rb'), fn_)
if del_old:
os.remove( ftemp )
fn_.close()
def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images',reverse =True,
num_max_para_process=50,direct_load_data=False, data_path=None):
'''
parallelly compressed eiger data without header, this function is for parallel compress
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
#N = int( np.ceil( N/ bins ) )
num_sub *= bins
if N%num_sub:
Nf = N// num_sub +1
print('The average image intensity would be slightly not correct, about 1% error.')
print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image')
else:
Nf = N//num_sub
print( 'It will create %i temporary files for parallel compression.'%Nf)
if Nf> num_max_para_process:
N_runs = np.int( np.ceil( Nf/float(num_max_para_process)))
print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process ))
else:
N_runs= 1
result = {}
#print( mask_filename )# + '*'* 10 + 'here' )
for nr in range( N_runs ):
if (nr+1)*num_max_para_process > Nf:
inputs= range( num_max_para_process*nr, Nf )
else:
inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) )
fns = [ filename + '_temp-%i.tmp'%i for i in inputs]
#print( nr, inputs, )
pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 )
#print( inputs )
for i in inputs:
if i*num_sub <= N:
result[i] = pool.apply_async( segment_compress_eigerdata, [
images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,direct_load_data, data_path ] )
pool.close()
pool.join()
pool.terminate()
return result
def segment_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1,
N1=None, N2=None, dtypes='images',reverse =True,direct_load_data=False, data_path=None ):
'''
Create a compressed eiger data without header, this function is for parallel compress
for parallel compress don't pass any non-scalar parameters
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images = load_data( uid, detector, reverse= reverse )[N1:N2]
else:
images = EigerImages(data_path, md)[N1:N2]
Nimg_ = len( images)
M,N = images[0].shape
avg_img = np.zeros( [M,N], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
#frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
#Nimg = Nimg_//bins
Nimg = int( np.ceil( Nimg_ / bins ) )
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
#print( time_edge, Nimg_, Nimg, bins, N1, N2 )
imgsum = np.zeros( Nimg )
if bins!=1:
#print('The frames will be binned by %s'%bins)
dtype=np.float64
fp = open( filename,'wb' )
for n in range(Nimg):
t1,t2 = time_edge[n]
if bins!=1:
img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype)
else:
img = np.array( images[t1], dtype=dtype)
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel( avg_img )[p] += v
good_count +=1
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1
del p,v, img
fp.flush()
fp.close()
avg_img /= good_count
bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)
sys.stdout.write('#')
sys.stdout.flush()
#del images, mask, avg_img, imgsum, bad_frame_list
#print( 'Should release memory here')
return mask, avg_img, imgsum, bad_frame_list
def create_compress_header( md, filename, nobytes=4, bins=1 ):
'''
Create the head for a compressed eiger data, this function is for parallel compress
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
fp.close()
def init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True,
direct_load_data=False, data_path=None):
'''
Compress the eiger data
Create a new mask by remove hot_pixel
Do image average
Do each image sum
Find badframe_list for where image sum above bad_pixel_threshold
Generate a compressed data with filename
if bins!=1, will bin the images with bin number as bins
Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]
Return
mask
avg_img
imsum
bad_frame_list
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
Nimg_ = len( images)
avg_img = np.zeros_like( images[0], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
Nimg = Nimg_//bins
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
imgsum = np.zeros( Nimg )
if bins!=1:
print('The frames will be binned by %s'%bins)
for n in tqdm( range(Nimg) ):
t1,t2 = time_edge[n]
img = np.average( images[t1:t2], axis=0 )
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
#if imgsum[n] >=bad_pixel_threshold :
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel(avg_img )[p] += v
good_count +=1
frac += dlen/Nopix
#s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v))
#n +=1
fp.close()
frac /=good_count
print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) )
avg_img /= good_count
bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0]
#bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0]
#bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0]
#bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
""" Description:
This is code that Mark wrote to open the multifile format
in compressed mode, translated to python.
This seems to work for DALSA, FCCD and EIGER in compressed mode.
It should be included in the respective detector.i files
Currently, this refers to the compression mode being '6'
Each file is image descriptor files chunked together as follows:
Header (1024 bytes)
|--------------IMG N begin--------------|
| Dlen
|---------------------------------------|
| Pixel positions (dlen*4 bytes |
| (0 based indexing in file) |
|---------------------------------------|
| Pixel data(dlen*bytes bytes) |
| (bytes is found in header |
| at position 116) |
|--------------IMG N end----------------|
|--------------IMG N+1 begin------------|
|----------------etc.....---------------|
Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End,
"""
class Multifile:
'''The class representing the multifile.
The recno is in 1 based numbering scheme (first record is 1)
This is efficient for reading in increasing order.
Note: reading same image twice in a row is like reading an earlier
numbered image and means the program starts for the beginning again.
'''
def __init__(self,filename,beg,end):
'''Multifile initialization. Open the file.
Here I use the read routine which returns byte objects
(everything is an object in python). I use struct.unpack
to convert the byte object to other data type (int object
etc)
NOTE: At each record n, the file cursor points to record n+1
'''
self.FID = open(filename,"rb")
# self.FID.seek(0,os.SEEK_SET)
self.filename = filename
#br: bytes read
br = self.FID.read(1024)
self.beg=beg
self.end=end
ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
'bytes',
'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end'
]
magic = struct.unpack('@16s', br[:16])
md_temp = struct.unpack('@8d7I916x', br[16:])
self.md = dict(zip(ms_keys, md_temp))
self.imgread=0
self.recno = 0
# some initialization stuff
self.byts = self.md['bytes']
if (self.byts==2):
self.valtype = np.uint16
elif (self.byts == 4):
self.valtype = np.uint32
elif (self.byts == 8):
self.valtype = np.float64
#now convert pieces of these bytes to our data
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
# now read first image
#print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts)
def _readHeader(self):
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
def _readImageRaw(self):
p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen)
v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen)
self.imgread=1
return(p,v)
def _readImage(self):
(p,v)=self._readImageRaw()
img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) )
np.put( np.ravel(img), p, v )
return(img)
def seekimg(self,n=None):
'''Position file to read the nth image.
For now only reads first image ignores n
'''
# the logic involving finding the cursor position
if (n is None):
n = self.recno
if (n < self.beg or n > self.end):
raise IndexError('Error, record out of range')
#print (n, self.recno, self.FID.tell() )
if ((n == self.recno) and (self.imgread==0)):
pass # do nothing
else:
if (n <= self.recno): #ensure cursor less than search pos
self.FID.seek(1024,os.SEEK_SET)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
self.recno = 0
self.imgread=0
if n == 0:
return
#have to iterate on seeking since dlen varies
#remember for rec recno, cursor is always at recno+1
if(self.imgread==0 ): #move to next header if need to
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
for i in range(self.recno+1,n):
#the less seeks performed the faster
#print (i)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
#print 's',self.dlen
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
# we are now at recno in file, read the header and data
#self._clearImage()
self._readHeader()
self.imgread=0
self.recno = n
def rdframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImage())
def rdrawframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImageRaw())
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
class Multifile_Bins( object ):
'''
Bin a compressed file with bins number
See Multifile for details for Multifile_class
'''
def __init__(self, FD, bins=100):
'''
FD: the handler of a compressed Eiger frames
bins: bins number
'''
self.FD=FD
if (FD.end - FD.beg)%bins:
print ('Please give a better bins number and make the length of FD/bins= integer')
else:
self.bins = bins
self.md = FD.md
#self.beg = FD.beg
self.beg = 0
Nimg = (FD.end - FD.beg)
slice_num = Nimg//bins
self.end = slice_num
self.time_edge = np.array(create_time_slice( N= Nimg,
slice_num= slice_num, slice_width= bins )) + FD.beg
self.get_bin_frame()
def get_bin_frame(self):
FD= self.FD
self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] )
for n in tqdm( range(len(self.time_edge))):
#print (n)
t1,t2 = self.time_edge[n]
#print( t1, t2)
self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1,
plot_ = False, show_progress = False )
def rdframe(self,n):
return self.frames[:,:,n]
def rdrawframe(self,n):
x_= np.ravel( self.rdframe(n) )
p= np.where( x_ ) [0]
v = np.array( x_[ p ])
return ( np.array(p, dtype=np.int32), v)
def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None,
show_progress=True, *argv,**kwargs):
'''Get average imagef from a data_series by every sampling number to save time'''
#avg_img = np.average(data_series[:: sampling], axis=0)
if beg is None:
beg = FD.beg
if end is None:
end = FD.end
avg_img = FD.rdframe(beg)
n=1
flag=True
if show_progress:
#print( sampling-1 + beg , end, sampling )
if bad_frame_list is None:
bad_frame_list =[]
fra_num = int( (end - beg )/sampling ) - len( bad_frame_list )
for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
#print(i, flag)
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
else:
for i in range( sampling-1 + beg , end, sampling ):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
avg_img /= n
if plot_:
if RUN_GUI:
fig = Figure()
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots()
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
im = ax.imshow(avg_img , cmap='viridis',origin='lower',
norm= LogNorm(vmin=0.001, vmax=1e2))
#ax.set_title("Masked Averaged Image")
ax.set_title('uid= %s--Masked-Averaged-Image-'%uid)
fig.colorbar(im)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--avg-img-"%uid + '.png'
plt.savefig( fp, dpi=fig.dpi)
#plt.show()
return avg_img
def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False):
"""Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
mean_intensity : array
The mean intensity of each ROI for all `images`
Dimensions:
len(mean_intensity) == len(index)
len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
qind, pixelist = roi.extract_label_indices( labeled_array )
if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) )
# handle various input for `index`
if index is None:
index = list(np.unique(labeled_array))
index.remove(0)
else:
try:
len(index)
except TypeError:
index = [index]
index = np.array( index )
#print ('here')
good_ind = np.zeros( max(qind), dtype= np.int32 )
good_ind[ index -1 ] = np.arange( len(index) ) +1
w = np.where( good_ind[qind -1 ] )[0]
qind = good_ind[ qind[w] -1 ]
pixelist = pixelist[w]
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] )
#fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
#maxqind = max(qind)
norm = np.bincount( qind )[1:]
n= 0
#for i in tqdm(range( FD.beg , FD.end )):
if not multi_cor:
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:]
n +=1
else:
ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ]
inputs = range( len(ring_masks) )
go_through_FD(FD)
pool = Pool(processes= len(inputs) )
print( 'Starting assign the tasks...')
results = {}
for i in tqdm ( inputs ):
results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
#return res
for i in inputs:
mean_intensity[:,i] = res[i]
print( 'ROI mean_intensit calculation is DONE!')
del results
del res
mean_intensity /= norm
return mean_intensity, index
def _get_mean_intensity_one_q( FD, sampling, labels ):
mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) )
n=0
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
for i in range( FD.beg, FD.end, sampling ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:]
n +=1
return mi
def get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=1e10, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30,
plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs):
'''Get the total intensity of each frame by sampling every N frames
Also get bad_frame_list by check whether above bad_pixel_threshold
Usuage:
imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000,
bad_pixel_threshold=1e10, plot_ = True)
'''
#print ( argv, kwargs )
#mask &= img < hot_pixel_threshold
imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) )
n=0
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ):
(p,v) = FD.rdrawframe(i)
if len(p)>0:
imgsum[n] = np.sum( v )
n += 1
if plot_:
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
fig, ax = plt.subplots()
ax.plot( imgsum,'bo')
ax.set_title('uid= %s--imgsum'%uid)
ax.set_xlabel( 'Frame_bin_%s'%sampling )
ax.set_ylabel( 'Total_Intensity' )
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--imgsum-"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
plt.show()
bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg
if bad_frame_list is not None:
bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) )
else:
bad_frame_list = bad_frame_list_
if len(bad_frame_list):
print ('Bad frame list length is: %s' %len(bad_frame_list))
else:
print ('No bad frames are involved.')
return imgsum,bad_frame_list
| bsd-3-clause |
josauder/procedural_city_generation | UI.py | 2 | 3982 | import os
import sys
import procedural_city_generation
donemessage = "\n"+(150*"-")+"\n\t\t\t Done, waiting for command\n"+(150*"-")+"\n"
path = os.path.dirname(procedural_city_generation.__file__)
sys.path.append(path)
if not os.path.exists(path+"/temp/"):
os.system("mkdir "+path+"/temp")
if not os.path.exists(path+"/outputs/"):
os.system("mkdir "+path+"/outputs")
def setup_matplotlib():
"""
This function is used to set the matplotlib backend correctly.
Parameters
----------
Returns
--------
None
:return:
"""
if sys.version[0] == "3":
import matplotlib
try:
matplotlib.use("Qt4Agg")
except:
print("PyQt4 is not installed - outputs will only be saved as images and not be visible at runtime")
print("However, it is strongly recommended that you install PyQt4 in order to use the GUI")
matplotlib.use("agg")
from procedural_city_generation.roadmap import main as roadmap_main
from procedural_city_generation.polygons import main as polygons_main
from procedural_city_generation.building_generation import main as building_generation_main
from procedural_city_generation.additional_stuff.Singleton import Singleton
def setRoadmapGUI(gui):
roadmap_main.gui = gui
Singleton("roadmap").kill()
def setPolygonsGUI(gui):
polygons_main.gui = gui
Singleton("polygons").kill()
def setBuilding_generationGUI(gui):
building_generation_main.gui = gui
Singleton("building_generation").kill()
def roadmap():
roadmap_main.main()
Singleton("roadmap").kill()
print(donemessage)
def polygons():
polygons_main.main(None)
Singleton("polygons").kill()
print(donemessage)
def building_generation():
building_generation_main.main()
Singleton("building_generation").kill()
print(donemessage)
def visualization():
os.system("blender --python "+path+"/visualization/blenderize.py")
from procedural_city_generation.additional_stuff.Singleton import Singleton
Singleton("visualization").kill()
def main(args):
"""
Welcome to procedural_city_generation, a module for procedurally generating a 3D model of a city in Blender with python.
A call to this module from the command line should follow this format::
python UI.py <submodule-name> <options>
<submodule-name> is either "roadmap", "polygons", "building_generation, "visualization".
<options> is either "run" or "configure"
If you want to configure a paremeter, go with
python UI.py <submodule-name> --configure <parameter-name> <new value>
"""
if len(args) == 1:
print(main.__doc__)
return 0
if "configure" in args[2]:
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"procedural_city_generation/inputs/{0}.conf".format(args[1]))
if len(args) == 3:
os.system("nano {0}".format(config_file))
sys.exit(0)
elif args[3] and args[4]:
import json
with open(config_file, 'r') as f:
wb = json.loads(f.read())
i = 0
while True:
try:
old = wb[args[3+i]]['value']
wb[args[3+i]]['value'] = eval(args[4+i])
print("{0} was changed from {1} to {2}".format(args[3+i], old, args[4+i]))
i += 2
if len(args)-1 < i+4:
break
except:
print(i, len(args))
print("Either {0} is not a configurable parameter for {1}".format(args[3+i], args[1]))
return 0
with open(config_file, 'w') as f:
f.write(json.dumps(wb, indent=2))
return 0
elif "run" in args[2]:
setup_matplotlib()
eval(args[1])()
if __name__ == '__main__':
main(sys.argv)
| mpl-2.0 |
liyu1990/sklearn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
mhbashari/machine-learning-snippets | Basic/01-linear_regression_tensorflow.py | 1 | 2015 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from numpy.core.multiarray import ndarray
__author__ = "mhbashari"
class LinearRegression:
def __init__(self, train_X: ndarray, train_Y: ndarray, learning_rate=0.001, training_epochs=100):
self.train_X = train_X
self.train_Y = train_Y
self.learning_rate = learning_rate
self.training_epochs = training_epochs
def fit(self):
x = tf.placeholder("float")
y = tf.placeholder("float")
a = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
pred = tf.multiply(x, a) + b
cost = tf.reduce_mean(tf.abs(pred - y))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(self.training_epochs):
for i, out in zip(self.train_X, self.train_Y):
sess.run(optimizer, feed_dict={x: i, y: out})
print("Epoch:", '%04d' % (epoch + 1), "cost=", "W=", sess.run(a), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={x: self.train_X, y: self.train_Y})
print("Training cost=", training_cost, "a=", sess.run(a), "b=", sess.run(b), '\n')
return sess.run(a), sess.run(b)
def visualize(a, b, train_X: ndarray, train_Y: ndarray):
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, train_Y)
plt.plot(train_X, a * train_X + b, label='Fitted line')
plt.scatter(train_X, train_Y)
plt.legend()
plt.show()
def data_maker(num=80):
X = np.arange(0, num, dtype=np.float32)
Y = np.float32(np.ceil(5 * (np.sin(X) + X / 5)))
return X, Y
if __name__ == "__main__":
data = data_maker(5)
regression = LinearRegression(*data_maker())
visualize(*(regression.fit() + data_maker()))
| mit |
pizzathief/numpy | doc/example.py | 51 | 3578 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
boland1992/seissuite_iran | build/lib/ambient/ant/pstomo.py | 2 | 62674 | """
Definition of classes handling dispersion curves and
velocity maps (obtained by inverting dispersion curves)
"""
import pserrors, psutils
import itertools as it
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import os
import glob
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import gridspec
from matplotlib.colors import ColorConverter
import shutil
from inspect import getargspec
# todo: discard measurments if too different from trimester velocities (see BB15B-SPB)
# ====================================================
# parsing configuration file to import some parameters
# ====================================================
from psconfig import (
SIGNAL_WINDOW_VMIN, SIGNAL_WINDOW_VMAX, SIGNAL2NOISE_TRAIL, NOISE_WINDOW_SIZE,
MINSPECTSNR, MINSPECTSNR_NOSDEV, MAXSDEV, MINNBTRIMESTER, MAXPERIOD_FACTOR,
LONSTEP, LATSTEP, CORRELATION_LENGTH, ALPHA, BETA, LAMBDA,
FTAN_ALPHA, FTAN_VELOCITIES_STEP, PERIOD_RESAMPLE)
# ========================
# Constants and parameters
# ========================
EPS = 1.0E-6
# custom color map for seismic anomalies
# --------------------------------------
c = ColorConverter()
colors = ['black', 'red', 'gold', 'white',
'white', 'aquamarine', 'blue', 'magenta']
values = [-1.0, -0.35, -0.1, -0.025,
0.025, 0.1, 0.35, 1.0]
#colors = ['black', 'red', 'gold', 'lemonchiffon', 'white',
# 'palegreen', 'aquamarine', 'blue', 'magenta']
#values = [-1.0, -0.7, -0.3, -0.1, 0.0,
# 0.1, 0.3, 0.7, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_SEISMIC = LinearSegmentedColormap('customseismic', cdict)
# custom color map for spatial resolution
# ---------------------------------------
colors = ['black', 'red', 'yellow', 'green', 'white']
values = [0, 0.25, 0.5, 0.75, 1.0]
#colors = ['magenta', 'blue', 'aquamarine', 'palegreen', 'white',
# 'lemonchiffon', 'gold', 'red', 'darkred']
#values = [-1.0, -0.7, -0.3, -0.1,
# 0.1, 0.3, 0.7, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_RESOLUTION = LinearSegmentedColormap('customresolution', cdict)
CMAP_RESOLUTION.set_bad(color='0.85')
# custom color map for path density
# ---------------------------------------
colors = ['white', 'cyan', 'green', 'yellow', 'red', 'black']
values = [0, 0.05, 0.1, 0.25, 0.5, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_DENSITY = LinearSegmentedColormap('customdensity', cdict)
class DispersionCurve:
"""
Class holding a dispersion curve, i.e., velocity
as a function of period
"""
def __init__(self, periods, v, station1, station2,
minspectSNR=MINSPECTSNR,
minspectSNR_nosdev=MINSPECTSNR_NOSDEV,
maxsdev=MAXSDEV,
minnbtrimester=MINNBTRIMESTER,
maxperiodfactor=MAXPERIOD_FACTOR,
nom2inst_periods=None):
"""
Initiliazes the dispersion curve between the pair *station1*-*station2*
using the given velocities (array *v*) at the given *periods*.
Selection parameters (used to select velocities that will participate
to the tomographic inversion) are given in *minspectSNR*,
*minspectSNR_nosdev*, *maxsdev*, *minnbtrimester* and *maxperiodfactor*.
Periods can be nominal (i.e., center of Gaussian filters of FTAN) or
instantaneous (dphi/dt). If periods are instantaneous, then a list
of tuples [(nominal period, instantaneous period), ...] should be
provided in *nom2inst_periods*
@type periods: iterable
@type v: iterable
@type station1: L{psstation.Station}
@type station2: L{psstation.Station}
"""
# periods and associated velocities
self.periods = np.array(periods)
self.v = np.array(v)
# SNRs along periods
self._SNRs = None
# trimester velocities and SNRs
self.v_trimesters = {}
self._SNRs_trimesters = {}
# stations
self.station1 = station1
self.station2 = station2
# selection parameters
self.minspectSNR = minspectSNR
self.minspectSNR_nosdev = minspectSNR_nosdev
self.maxsdev = maxsdev
self.minnbtrimester = minnbtrimester
self.maxperiodfactor = maxperiodfactor
# list of (nominal period, instantaneous period)
self.nom2inst_periods = nom2inst_periods
def __repr__(self):
return 'Dispersion curve between stations {}-{}'.format(self.station1.name,
self.station2.name)
def get_period_index(self, period):
"""
Gets index of *period*, or raises an error if period
is not found
"""
iperiod = np.abs(self.periods - period).argmin()
if np.abs(self.periods[iperiod] - period) > EPS:
raise Exception('Cannot find period in dispersion curve')
return iperiod
def update_parameters(self, minspectSNR=None, minspectSNR_nosdev=None,
maxsdev=None, minnbtrimester=None, maxperiodfactor=None):
"""
Updating one or more filtering parameter(s)
"""
if not minspectSNR is None:
self.minspectSNR = minspectSNR
if not minspectSNR_nosdev is None:
self.minspectSNR_nosdev = minspectSNR_nosdev
if not maxsdev is None:
self.maxsdev = maxsdev
if not minnbtrimester is None:
self.minnbtrimester = minnbtrimester
if not maxperiodfactor is None:
self.maxperiodfactor = maxperiodfactor
def dist(self):
"""
Interstation spacing (km)
"""
return self.station1.dist(self.station2)
def add_trimester(self, trimester_start, curve_trimester):
"""
Adding a trimester dispersion curve.
@type trimester_start: int
@type curve_trimester: L{DispersionCurve}
"""
if trimester_start in self.v_trimesters:
raise Exception('Trimester already added')
if np.any(curve_trimester.periods != self.periods):
raise Exception("Wrong periods for trimester curve")
# adding velocity adn SNR arrays of trimester
self.v_trimesters[trimester_start] = curve_trimester.v
self._SNRs_trimesters[trimester_start] = curve_trimester._SNRs
def add_SNRs(self, xc, filter_alpha=FTAN_ALPHA, months=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Adding spectral SNRs at each period of the dispersion curve.
The SNRs are calculated from the cross-correlation data
bandpassed with narrow Gaussian filters (similar to the filter
used in the FTAN) centered at self.periods, and width controlled
by *filter_alpha*. (See psutils.bandpass_gaussian().)
Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function xc.SNR()).
@type xc: L{CrossCorrelation}
"""
centerperiods_and_alpha = zip(self.periods, [filter_alpha] * len(self.periods))
SNRs = xc.SNR(centerperiods_and_alpha=centerperiods_and_alpha,
months=months, vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if self.nom2inst_periods:
# if a list of (nominal period, inst period) is provided
# we use it to re-interpolate SNRs
inst_period_func = interp1d(*zip(*self.nom2inst_periods))
SNRs = np.interp(x=self.periods,
xp=inst_period_func(self.periods),
fp=SNRs,
left=np.nan,
right=np.nan)
self._SNRs = SNRs
def get_SNRs(self, **kwargs):
if self._SNRs is None:
self.add_SNRs(**kwargs)
return self._SNRs
def filtered_sdevs(self):
"""
Standard dev of velocity at each period, calculated
across trimester velocity curves. On periods at which
std dev cannot be calculated, NaNs are returned.
Selection criteria:
- SNR of trimester velocity >= minspectSNR
- nb of trimester velocities >= minnbtrimester
@rtype: L{numpy.ndarray}
"""
# list of arrays of trimester velocities
trimester_vels = self.filtered_trimester_vels()
sdevs = []
for v_across_trimesters in zip(*trimester_vels):
# filtering out nans from trimester velocities
v_across_trimesters = [v for v in v_across_trimesters if not np.isnan(v)]
if len(v_across_trimesters) >= self.minnbtrimester:
sdev = np.std(v_across_trimesters)
else:
# not enough trimester velocities to estimate std dev
sdev = np.nan
sdevs.append(sdev)
return np.array(sdevs) if sdevs else np.ones_like(self.periods) * np.nan
def filtered_vels_sdevs(self):
"""
Returns array of velocities and array of associated
standard deviations. Velocities not passing selection
criteria are replaced with NaNs. Where standard
deviation cannot be estimated, NaNs are returned.
Selection criteria:
1) period <= distance * *maxperiodfactor*
2) for velocities having a standard deviation associated:
- standard deviation <= *maxsdev*
- SNR >= *minspectSNR*
3) for velocities NOT having a standard deviation associated:
- SNR >= *minspectSNR_nosdev*
(SNRs equal to Nan are replaced with 0)
@rtype: L{numpy.ndarray}, L{numpy.ndarray}
"""
if self._SNRs is None:
raise Exception("Spectral SNRs not defined")
# estimating std devs, WHERE POSSIBLE (returning NaNs where not possible)
sdevs = self.filtered_sdevs()
has_sdev = ~np.isnan(sdevs) # where are std devs defined?
# Selection criteria:
# 1) period <= distance * *maxperiodfactor*
cutoffperiod = self.maxperiodfactor * self.dist()
mask = self.periods <= cutoffperiod
# 2) for velocities having a standard deviation associated:
# - standard deviation <= *maxsdev*
# - SNR >= *minspectSNR*
mask[has_sdev] &= (sdevs[has_sdev] <= self.maxsdev) & \
(np.nan_to_num(self._SNRs[has_sdev]) >= self.minspectSNR)
# 3) for velocities NOT having a standard deviation associated:
# - SNR >= *minspectSNR_nosdev*
mask[~has_sdev] &= \
np.nan_to_num(self._SNRs[~has_sdev]) >= self.minspectSNR_nosdev
# replacing velocities not passing the selection criteria with NaNs
return np.where(mask, self.v, np.nan), sdevs
def filtered_vel_sdev_SNR(self, period):
"""
Returns a velocity, its std deviation and SNR at a given period,
or nan if the velocity does not satisfy the criteria, or
raises an exception if the period is not found.
@type period: float
@rtype: (float, float, float)
"""
iperiod = self.get_period_index(period)
vels, sdevs = self.filtered_vels_sdevs()
return vels[iperiod], sdevs[iperiod], self._SNRs[iperiod]
def filtered_trimester_vels(self):
"""
Returns list of arrays of trimester velocities, or nan.
Selection criteria:
- SNR of trimester velocity defined and >= minspectSNR
- period <= pair distance * *maxperiodfactor*
@rtype: list of L{numpy.ndarray}
"""
# filtering criterion: periods <= distance * maxperiodfactor
dist = self.station1.dist(self.station2)
periodmask = self.periods <= self.maxperiodfactor * dist
varrays = []
for trimester_start, vels in self.v_trimesters.items():
SNRs = self._SNRs_trimesters.get(trimester_start)
if SNRs is None:
raise Exception("Spectral SNRs not defined")
# filtering criterion: SNR >= minspectSNR
mask = periodmask & (np.nan_to_num(SNRs) >= self.minspectSNR)
varrays.append(np.where(mask, vels, np.nan))
return varrays
class Grid:
"""
Class holding a 2D regular rectangular spatial grid
"""
def __init__(self, xmin, xstep, nx, ymin, ystep, ny):
"""
Min coords, step size and nb of points of grid
"""
self.xmin = xmin
self.xstep = xstep
self.nx = int(nx)
self.ymin = ymin
self.ystep = ystep
self.ny = int(ny)
def __repr__(self):
s = '<2D grid: x = {}...{} by {}, y = {}...{} by {}>'
return s.format(self.xmin, self.get_xmax(), self.xstep,
self.ymin, self.get_ymax(), self.ystep)
def __eq__(self, other):
"""
@type other: Grid
"""
try:
samegrids = (self.xmin == other.xmin and
self.xstep == other.xstep and
self.nx == other.nx and
self.ymin == other.ymin and
self.ystep == other.ystep and
self.ny == other.ny)
return samegrids
except:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_xmax(self):
return self.xmin + (self.nx - 1) * self.xstep
def get_ymax(self):
return self.ymin + (self.ny - 1) * self.ystep
def bbox(self):
"""
Bounding box: (xmin, xmax, ymin, ymax)
@rtype: (float, float, float, float)
"""
return self.xmin, self.get_xmax(), self.ymin, self.get_ymax()
def n_nodes(self):
"""
Nb of nodes on grid
"""
return self.nx * self.ny
def ix_iy(self, index_):
"""
Indexes along x and y-axis of node nb *index_*
"""
ix = np.int_(np.array(index_) / self.ny)
iy = np.mod(np.array(index_), self.ny)
return ix, iy
def xy(self, index_):
"""
Coords of node nb *index_*
"""
index_ = np.array(index_)
if np.any((index_ < 0) | (index_ > self.n_nodes() - 1)):
raise Exception('Index out of bounds')
ix, iy = self.ix_iy(index_)
return self._x(ix), self._y(iy)
def xy_nodes(self):
"""
Returns coords of all nodes of grid
"""
return self.xy(np.arange(0, self.n_nodes()))
def xarray(self):
return np.linspace(self.xmin, self.get_xmax(), num=self.nx, endpoint=True)
def yarray(self):
return np.linspace(self.ymin, self.get_ymax(), num=self.ny, endpoint=True)
def index_(self, ix, iy):
"""
Index of node (ix, iy) in grid:
- 0 : ix=0, iy=0
- 1 : ix=0, iy=1
- ...
- ny: ix=1, iy=0
- ...
- nx*ny-1: ix=nx-1, iy=ny-1
"""
ix = np.array(ix)
iy = np.array(iy)
if np.any((ix < 0) | (ix > self.nx - 1)):
raise Exception('ix out of bounds')
if np.any((iy < 0) | (iy > self.ny - 1)):
raise Exception('iy out of bounds')
return ix * self.ny + iy
def indexes_delaunay_triangle(self, x, y):
"""
Indexes of the grid's nodes defining the
Delaunay triangle around point (x, y)
"""
# x and y indexes of bottom left neighbour
ix = self._xindex_left_neighbour(x)
iy = self._yindex_bottom_neighbour(y)
np.where(ix == self.nx - 1, ix - 1, ix)
np.where(iy == self.ny - 1, iy - 1, iy)
xratio = (x - self._x(ix)) / self.xstep
yratio = (y - self._y(iy)) / self.ystep
# returning indexes of vertices of bottom right triangle
# or upper left triangle depending on location
index1 = self.index_(ix, iy)
index2 = np.where(xratio >= yratio, self.index_(ix+1, iy), self.index_(ix, iy+1))
index3 = self.index_(ix+1, iy+1)
return index1, index2, index3
def geodetic_dist(self, index1, index2):
"""
Geodetic distance between nodes nb *index1* and *index2*,
whose coodinates (x, y) are treated as (lon, lat)
"""
lon1, lat2 = self.xy(index1)
lon2, lat2 = self.xy(index2)
return psutils.dist(lons1=lon1, lats1=lat2, lons2=lon2, lats2=lat2)
def to_2D_array(self, a):
"""
Converts a sequence-like *a* to a 2D array b[ix, iy]
such that i is the index of node (ix, iy)
"""
b = np.zeros((self.nx, self.ny))
ix, iy = self.ix_iy(range(self.n_nodes()))
b[ix, iy] = np.array(a).flatten()
return b
def _x(self, ix):
"""
Returns the abscissa of node nb *ix* on x-axis
(ix = 0 ... nx-1)
"""
ix = np.array(ix)
if np.any((ix < 0) | (ix > self.nx - 1)):
raise Exception('ix out of bounds')
return self.xmin + ix * self.xstep
def _y(self, iy):
"""
Returns the ordinate of node nb *iy* on y-axis
"""
iy = np.array(iy)
if np.any((iy < 0) | (iy > self.ny - 1)):
raise Exception('iy out of bounds')
return self.ymin + iy * self.ystep
def _xindex_left_neighbour(self, x):
"""
Returns the index (along x-axis) of the grid nodes
closest to (and on the left of) *x*
(Index of 1st node = 0, index of last node = nx - 1)
@rtype: Number
"""
x = np.array(x)
# checking bounds
out_of_bounds = (x < self.xmin) | (x > self.get_xmax())
if np.any(out_of_bounds):
s = 'some x {} are out of bounds [{} - {}]'
raise Exception(s.format(x[out_of_bounds], self.xmin, self.get_xmax()))
# index of closest left node
return np.int_((x - self.xmin) / self.xstep)
def _yindex_bottom_neighbour(self, y):
"""
Same as above method, along y axis
@rtype: Number
"""
y = np.array(y)
# checking bounds
out_of_bounds = (y < self.ymin) | (y > self.get_ymax())
if np.any(out_of_bounds):
s = 'some y {} are out of bounds [{} - {}]'
raise Exception(s.format(y[out_of_bounds], self.ymin, self.get_ymax()))
# index of closest bottom node
return np.int_((y - self.ymin) / self.ystep)
class VelocityMap:
"""
Class taking care of the inversion of velocities between
pairs of stations, to produce a velocity map at a given
period. The inversion procedure of Barmin et al. (2001)
is applied.
Attributes:
- period : period (s) of the velocity map
- disp_curves : disp curves whose period's velocity is not nan
- paths : list of geodesic paths associated with pairs of stations
of dispersion curves
- v0 : reference velocity (inverse of mean slowness, i.e.,
slowness implied by all observed travel-times)
- dobs : vector of observed data (differences observed-reference travel time)
- Cinv : inverse of covariance matrix of the data
- G : forward matrix, such that d = G.m
(m = parameter vector = (v0-v)/v at grid nodes)
- density : array of path densities at grid nodes
- Q : regularization matrix
- Ginv : inversion operator, (Gt.C^-1.G + Q)^-1.Gt
- mopt : vector of best-fitting parameters, Ginv.C^-1.dobs
= best-fitting (v0-v)/v at grid nodes
- R : resolution matrix, (Gt.C^-1.G + Q)^-1.Gt.C^-1.G = Ginv.C^-1.G
- Rradius : array of radii of the cones that best-fit each line of the
resolution matrix
Note that vectors (d, m) and matrixes (Cinv, G, Q, Ginv, R) are NOT
numpy arrays, but numpy matrixes (vectors being n x 1 matrixes). This
means that the product operation (*) on such objects is NOT the
element-by-element product, but the real matrix product.
"""
def __init__(self, dispersion_curves, period, skippairs=(),
resolution_fit='cone', min_resolution_height=0.1,
showplot=False, verbose=True, **kwargs):
"""
Initializes the velocity map at period = *period*, from
the observed velocities in *dispersion_curves*:
- sets up the data vector, forward matrix and regularization matrix
- performs the tomographic inversion to estimate the best-fitting
parameters and the resolution matrix
- estimates the characteristic spatial resolution by fitting a cone
to each line of the resolution matrix
Specify pairs to be skipped (if any), as a list of pairs of stations names,
e.g., skippairs = [('APOB', 'SPB'), ('ITAB', 'BAMB')].
This option is useful to perform a 2-pass tomographic inversion,
wherein pairs with a too large difference observed/predicted travel-
time are excluded from the second pass.
Select the type of function you want to fit to each resolution map
with *resolution_fit*:
- 'cone' to fit a cone, and report the cone's radius as characteristic
resolution at each grid node in self.Rradius
- 'gaussian' to fit a gaussian function, exp(-r/2.sigma^2), and report
2.sigma as characteristic resolution at each grid node in self.Rradius
Note that all resolutions in self.Rradius having a best-fitting
cone height < *min_resolution_height* * max height will be
discarded and set to nan.
Append optional argument (**kwargs) to override default values:
- minspectSNR : min spectral SNR to retain velocity
(default MINSPECTSNR)
- minspectSNR_nosdev: min spectral SNR to retain velocities without standard
deviation (default MINSPECTSNR_NOSDEV)
- minnbtrimester : min nb of trimester velocities to estimate standard
deviation of velocity
- maxsdev : max standard deviation to retain velocity (default MAXSDEV)
- lonstep : longitude step of grid (default LONSTEP)
- latstep : latitude step of grid (default LATSTEP)
- correlation_length: correlation length of the smoothing kernel:
S(r,r') = exp[-|r-r'|**2 / (2 * correlation_length**2)]
(default value CORRELATION_LENGTH)
- alpha : strength of the spatial smoothing term in the penalty
function (default ALPHA)
- beta : strength of the weighted norm penalization term in the
penalty function (default BETA)
- lambda_ : parameter in the damping factor of the norm penalization
term, such that the norm is weighted by:
exp(- lambda_*path_density)
With a value of 0.15, penalization becomes strong when
path density < ~20
With a value of 0.30, penalization becomes strong when
path density < ~10
(default LAMBDA)
@type dispersion_curves: list of L{DispersionCurve}
@type skippairs: list of (str, str)
"""
self.period = period
# reading inversion parameters
minspectSNR = kwargs.get('minspectSNR', MINSPECTSNR)
minspectSNR_nosdev = kwargs.get('minspectSNR_nosdev', MINSPECTSNR_NOSDEV)
minnbtrimester = kwargs.get('minnbtrimester', MINNBTRIMESTER)
maxsdev = kwargs.get('maxsdev', MAXSDEV)
lonstep = kwargs.get('lonstep', LONSTEP)
latstep = kwargs.get('latstep', LATSTEP)
correlation_length = kwargs.get('correlation_length', CORRELATION_LENGTH)
alpha = kwargs.get('alpha', ALPHA)
beta = kwargs.get('beta', BETA)
lambda_ = kwargs.get('lambda_', LAMBDA)
if verbose:
print "Velocities selection criteria:"
print "- rejecting velocities if SNR < {}".format(minspectSNR)
s = "- rejecting velocities without std dev if SNR < {}"
print s.format(minspectSNR_nosdev)
s = "- estimating standard dev of velocities with more than {} trimesters"
print s.format(minnbtrimester)
print "- rejecting velocities with standard dev > {} km/s".format(maxsdev)
print "\nTomographic inversion parameters:"
print "- {} x {} deg grid".format(lonstep, latstep)
s = "- correlation length of the smoothing kernel: {} km"
print s.format(correlation_length)
print "- strength of the spatial smoothing term: {}".format(alpha)
print "- strength of the norm penalization term: {}".format(beta)
print "- weighting norm by exp(- {} * path_density)".format(lambda_)
print
# skipping pairs
if skippairs:
skippairs = [set(pair) for pair in skippairs]
dispersion_curves = [c for c in dispersion_curves
if not {c.station1.name, c.station2.name} in skippairs]
# updating parameters of dispersion curves
for c in dispersion_curves:
c.update_parameters(minspectSNR=minspectSNR,
minspectSNR_nosdev=minspectSNR_nosdev,
minnbtrimester=minnbtrimester,
maxsdev=maxsdev)
# valid dispersion curves (velocity != nan at period) and
# associated interstation distances
self.disp_curves = [c for c in dispersion_curves
if not np.isnan(c.filtered_vel_sdev_SNR(self.period)[0])]
if not self.disp_curves:
s = "No valid velocity at selected period ({} sec)"
raise pserrors.CannotPerformTomoInversion(s.format(period))
dists = np.array([c.dist() for c in self.disp_curves])
# getting (non nan) velocities and std devs at period
vels, sigmav, _ = zip(*[c.filtered_vel_sdev_SNR(self.period)
for c in self.disp_curves])
vels = np.array(vels)
sigmav = np.array(sigmav)
sigmav_isnan = np.isnan(sigmav)
if np.all(sigmav_isnan):
s = "No valid std deviation at selected period ({} sec)"
raise pserrors.CannotPerformTomoInversion(s.format(period))
# If the resolution in the velocities space is dv,
# it means that a velocity v is actually anything between
# v-dv/2 and v+dv/2, so the standard deviation cannot be
# less than the standard dev of a uniform distribution of
# width dv, which is dv / sqrt(12). Note that:
#
# dv = max(dv_FTAN, dt_xc * v^2/dist),
#
# with dv_FTAN the intrinsic velocity discretization step
# of the FTAN, and dt_xc the sampling interval of the
# cross-correlation.
dv = np.maximum(FTAN_VELOCITIES_STEP, PERIOD_RESAMPLE * vels**2 / dists)
minsigmav = dv / np.sqrt(12)
sigmav[~sigmav_isnan] = np.maximum(sigmav[~sigmav_isnan],
minsigmav[~sigmav_isnan])
# where std dev cannot be estimated (std dev = nan),
# assigning 3 times the mean std dev of the period
# following Bensen et al. (2008)
sigmav[sigmav_isnan] = 3 * sigmav[~sigmav_isnan].mean()
# ======================================================
# setting up reference velocity and data vector
# = vector of differences observed-reference travel time
# ======================================================
if verbose:
print 'Setting up reference velocity (v0) and data vector (dobs)'
# reference velocity = inverse of mean slowness
# mean slowness = slowness implied by observed travel-times
# = sum(observed travel-times) / sum(intersation distances)
s = (dists / vels).sum() / dists.sum()
self.v0 = 1.0 / s
# data vector
self.dobs = np.matrix(dists / vels - dists / self.v0).T
# inverse of covariance matrix of the data
if verbose:
print 'Setting up covariance matrix (C)'
sigmad = sigmav * dists / vels**2
self.Cinv = np.matrix(np.zeros((len(sigmav), len(sigmav))))
np.fill_diagonal(self.Cinv, 1.0 / sigmad**2)
# spatial grid for tomographic inversion (slightly enlarged to be
# sure that no path will fall outside)
lons1, lats1 = zip(*[c.station1.coord for c in self.disp_curves])
lons2, lats2 = zip(*[c.station2.coord for c in self.disp_curves])
tol = 0.5
lonmin = np.floor(min(lons1 + lons2) - tol)
nlon = np.ceil((max(lons1 + lons2) + tol - lonmin) / lonstep) + 1
latmin = np.floor(min(lats1 + lats2) - tol)
nlat = np.ceil((max(lats1 + lats2) + tol - latmin) / latstep) + 1
self.grid = Grid(lonmin, lonstep, nlon, latmin, latstep, nlat)
# geodesic paths associated with pairs of stations of dispersion curves
if verbose:
print 'Calculating interstation paths'
self.paths = []
for curve, dist in zip(self.disp_curves, dists):
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(np.ceil(dist) + 1, 100)
path = psutils.geodesic(curve.station1.coord, curve.station2.coord, npts)
self.paths.append(path)
# ================================================
# setting up forward matrix G, such that d = G.m
#
# G[i,j] = integral{w_j(r) / v0 ds} over path nb i
# (w_j(r) = weight of node nb j on location r)
# ================================================
G = np.zeros((len(self.paths), self.grid.n_nodes()))
if verbose:
print 'Setting up {} x {} forward matrix (G)'.format(*G.shape)
for ipath, path in enumerate(self.paths):
# for each point M along the path (1) we determine the Delaunay
# triangle ABC that encloses M, (2) we locally define a cartesian
# system on the plane ABC, (3) we locate M' (the projection of M
# on the plane ABC) and (4) we attribute weights to A, B, C
# corresponding to the three-point linear interpolation of A, B,
# C at point M'.
lon_M, lat_M = path[:, 0], path[:, 1]
xyzM = psutils.geo2cartesian(lon_M, lat_M)
# indexes, geographic coordinates and cartesian coordinates
# (on unit sphere) of grid nodes of Delaunay triangle ABC
# enclosing M
iA, iB, iC = self.grid.indexes_delaunay_triangle(lon_M, lat_M)
lonlatA, lonlatB, lonlatC = [self.grid.xy(index_) for index_ in (iA, iB, iC)]
xyzA, xyzB, xyzC = [psutils.geo2cartesian(lon, lat)
for lon, lat in (lonlatA, lonlatB, lonlatC)]
# projection of M on the plane ABC
xyzMp = psutils.projection(xyzM, xyzA, xyzB, xyzC)
# weights of nodes A, B, C in linear interpolation =
# barycentric coordinates of M' in triangle ABC
wA, wB, wC = psutils.barycentric_coords(xyzMp, xyzA, xyzB, xyzC)
# attributing weights to grid nodes along path:
# w[j, :] = w_j(r) = weights of node j along path
nM = path.shape[0]
w = np.zeros((self.grid.n_nodes(), nM))
w[iA, range(nM)] = wA
w[iB, range(nM)] = wB
w[iC, range(nM)] = wC
# ds = array of infinitesimal distances along path
ds = psutils.dist(lons1=lon_M[:-1], lats1=lat_M[:-1],
lons2=lon_M[1:], lats2=lat_M[1:])
# integrating w_j(r) / v0 along path using trapeze formula
G[ipath, :] = np.sum(0.5 * (w[:, :-1] + w[:, 1:]) / self.v0 * ds, axis=-1)
self.G = np.matrix(G)
# path densities around grid's nodes
if verbose:
print "Calculating path densities"
self.density = self.path_density()
# =====================================================================
# setting up regularization matrix Q = Ft.F + Ht.H
#
# F[i,j] = alpha * | 1 if i = j
# | -S(ri,rj) / sum{S(ri,rj')} over j' != i] if i!= j
#
# H[i,j] = beta * | exp[-lambda * path_density(ri)] if i = j
# | 0 if i!= j
#
# with S(.,.) the smoothing kernel and ri the locations grid nodes
# =====================================================================
# setting up distance matrix:
# dists[i,j] = distance between nodes nb i and j
dists = np.zeros((self.grid.n_nodes(), self.grid.n_nodes()))
if verbose:
print "Setting up {} x {} regularization matrix (Q)".format(*dists.shape)
# indices of the upper right triangle of distance matrix
# = (array of index #1, array of index #2)
i_upper, j_upper = np.triu_indices_from(dists)
lons_i, lats_i = self.grid.xy(i_upper)
lons_j, lats_j = self.grid.xy(j_upper)
# distance matrix (upper triangle)
dists[i_upper, j_upper] = psutils.dist(lons1=lons_i, lats1=lats_i,
lons2=lons_j, lats2=lats_j)
# symmetrizing distance matrix (works because diagonal elts = 0)
dists += dists.T
# setting up smoothing kernel:
# S[i,j] = K * exp[-|ri-rj|**2 / (2 * CORRELATION_LENGTH**2)]
S = np.exp(- dists**2 / (2 * correlation_length**2))
S /= S.sum(axis=-1) - np.diag(S) # normalization of non-diagonal terms
# setting up spatial regularization matrix F
F = np.matrix(-S)
F[np.diag_indices_from(F)] = 1
F *= alpha
# setting up regularization matrix Q
# ... Ft.F part
Q = F.T * F
# ... Ht.H part
for i, path_density in enumerate(self.density):
Q[i, i] += beta**2 * np.exp(-2 * lambda_ * path_density)
self.Q = Q
# ===========================================================
# setting up inversion operator Ginv = (Gt.C^-1.G + Q)^-1.Gt,
# estimating model and setting up resolution matrix R =
# Ginv.C^-1.G
# ===========================================================
# inversion operator
if verbose:
print "Setting up inversion operator (Ginv)"
self.Ginv = (self.G.T * self.Cinv * self.G + self.Q).I * self.G.T
# vector of best-fitting parameters
if verbose:
print "Estimating best-fitting parameters (mopt)"
self.mopt = self.Ginv * self.Cinv * self.dobs
# resolution matrix
if verbose:
print "Setting up {0} x {0} resolution matrix (R)".format(self.G.shape[1])
self.R = self.Ginv * self.Cinv * self.G
# ===========================================================
# Estimating spatial resolution at each node of the grid,
# Rradius.
#
# The i-th row of the resolution matrix, R[i,:], contains the
# resolution map associated with the i-th grid noe, that is,
# the estimated model we would get if there were only a point
# velocity anomaly at node nb i. So a cone centered on node
# nb i is fitted to the resolution map, and its radius gives
# an indication of the spatial resolution at node nb i (i.e.,
# the minimum distance at which two point anomalies can be
# resolved)
# ===========================================================
if verbose:
print "Estimation spatial resolution (Rradius)"
self.Rradius = np.zeros(self.grid.n_nodes())
heights = np.zeros(self.grid.n_nodes())
for i, Ri in enumerate(np.array(self.R)):
lon0, lat0 = self.grid.xy(i)
# best-fitting cone at point (lon0, lat0)
# Function returning the height of cone of radius *r0*
# and peak *z0*, at a point located *r* km away from
# the cone's center
if resolution_fit.lower().strip() == 'cone':
def cone_height(r, z0, r0):
"""
Cone
"""
return np.where(r < r0, z0 * (1 - r / r0), 0.0)
elif resolution_fit.lower().strip() == 'gaussian':
def cone_height(r, z0, r0):
"""
Gaussian function
"""
sigma = r0 / 2.0
return z0 * np.exp(- r**2 / (2 * sigma**2))
else:
s = "Unknown function to fit resolution: '{}'"
raise Exception(s.format(resolution_fit))
# distances between nodes and cone's center (lon0, lat0)
lonnodes, latnodes = self.grid.xy_nodes()
n = self.grid.n_nodes()
rdata = psutils.dist(lons1=lonnodes, lats1=latnodes,
lons2=n*[lon0], lats2=n*[lat0])
# best possible resolution *rmin* = 2 * inter-node distance
# -> estimating *rmin* along the meridian crossing the cone's
# center (conservative choice as it yields the largest
# possible value)
d2rad = np.pi / 180.0
rmin = 2 * d2rad * 6371.0 * max(self.grid.xstep * np.cos(lat0 * d2rad),
self.grid.ystep)
# fitting the above function to observed heights along nodes,
# in array abs(Ri)
popt, _ = curve_fit(f=cone_height, xdata=rdata, ydata=np.abs(Ri),
p0=[1, 2*rmin], maxfev=10000)
z0, r0 = popt
# reslution cannot be better than *rmin*
r0 = max(rmin, r0)
# appending spatial resolution to array
self.Rradius[i] = r0
heights[i] = z0
self.Rradius[heights < heights.max() * min_resolution_height] = np.nan
if showplot:
# potting maps of velocity perturbation,
# path density and resolution
_ = self.plot()
def __repr__(self):
"""
E.g., "<Velocity map at period = 10 s>"
"""
return '<Velocity map at period = {} s>'.format(self.period)
def path_density(self, window=(LONSTEP, LATSTEP)):
"""
Returns the path density, that is, on each node of the
grid, the number of paths that cross the rectangular
cell of size (window[0], window[1]) centered on
the node.
"""
# initializing path density
density = np.zeros(self.grid.n_nodes())
# coordinates of grid nodes and associated windows
lons_nodes, lats_nodes = self.grid.xy_nodes()
lons_min = np.expand_dims(lons_nodes - window[0] / 2.0, axis=-1)
lons_max = np.expand_dims(lons_nodes + window[0] / 2.0, axis=-1)
lats_min = np.expand_dims(lats_nodes - window[1] / 2.0, axis=-1)
lats_max = np.expand_dims(lats_nodes + window[1] / 2.0, axis=-1)
for path in self.paths:
lons_path, lats_path = path[:, 0], path[:, 1]
# are points of paths in windows?
# 1st dim = grid nodes; 2nd dim = points along path
points_in_windows = (lons_path >= lons_min) & (lons_path <= lons_max) & \
(lats_path >= lats_min) & (lats_path <= lats_max)
density += np.any(points_in_windows, axis=-1)
return density
def traveltime_residuals(self, relative=False):
"""
Returns the [relative] differences between predicted-observed
travel times at each pair of stations:
differences = predicted - observed travel-time,
= dpred - dobs,
with dpred = G.mopt
relative differences = (predicted - observed) / observed travel-time
= (dpred - dobs) / (dobs + ref travel-time)
@rtype: L{ndarray}
"""
# flattening differences as 1D array
diffs = np.array(self.G * self.mopt - self.dobs).flatten()
if not relative:
return diffs
else:
ttref = np.array([c.dist() / self.v0 for c in self.disp_curves])
ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times
return diffs / ttobs
def velocity_residuals(self, relative=False):
"""
Returns the [relative] differences between observed-predicted
velocities (implied by travel times) at each pair of stations:
differences = observed - predicted velocity,
= observed - predicted (dist / travel time),
@rtype: L{matrix}
"""
dists = np.array([c.dist() for c in self.disp_curves])
ttref = np.array([c.dist() / self.v0 for c in self.disp_curves])
ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times
ttpred = np.array(self.G * self.mopt).flatten() + ttref # predicted tt
vobs = dists / ttobs # observed velocities
vpred = dists / ttpred # predicted velocities
if not relative:
return vobs - vpred
else:
return (vobs - vpred) / vobs
def checkerboard_func(self, vmid, vmin, vmax, squaresize, shape='cos'):
"""
Returns a checkerboard function, f(lons, lats), whose background
value is *vmid*, and alternating min/max values are *vmin* and
*vmax*. The centers of the anomalies are separated by *squaresize*
(in km), and their shape is either 'gaussian' or 'cos'.
@rtype: function
"""
# converting square size from km to degrees
d2rad = np.pi / 180.0
midlat = 0.5 * (self.grid.ymin + self.grid.get_ymax())
latwidth = squaresize / 6371.0 / d2rad
lonwidth = squaresize / (6371.0 * np.cos(midlat * d2rad)) / d2rad
# Basis function defining an anomaly of
# unit height centered at (*lon0*, *lat0*).
if shape.lower().strip() == 'gaussian':
def basis_func(lons, lats, lon0, lat0):
"""
Gausian anomaly , with sigma-parameter such that 3 sigma
is the distance between the center and the border of
the square, that is, half the distance between 2
centers.
"""
n = len(lons)
r = psutils.dist(lons1=lons, lats1=lats, lons2=n*[lon0], lats2=n*[lat0])
sigma = squaresize / 6.0
return np.exp(- r**2 / (2 * sigma**2))
elif shape.lower().strip() == 'cos':
def basis_func(lons, lats, lon0, lat0):
"""
Cosinus anomaly
"""
x = (lons - lon0) / lonwidth
y = (lats - lat0) / latwidth
outside_square = (np.abs(x) >= 0.5) | (np.abs(y) >= 0.5)
return np.where(outside_square, 0.0, np.cos(np.pi*x) * np.cos(np.pi*y))
else:
raise Exception("Unknown shape anomaly: " + shape)
# coordinates of the center of the anomalies
startlon = self.grid.xmin + lonwidth / 2.0
stoplon = self.grid.get_xmax() + lonwidth
centerlons = list(np.arange(startlon, stoplon, lonwidth))
startlat = self.grid.ymin + latwidth / 2.0
stoplat = self.grid.get_ymax() + latwidth
centerlats = list(np.arange(startlat, stoplat, latwidth))
centerlonlats = list(it.product(centerlons, centerlats))
# factors by which multiply the basis function associated
# with each center (to alternate lows and highs)
polarities = [(centerlons.index(lon) + centerlats.index(lat)) % 2
for lon, lat in centerlonlats]
factors = np.where(np.array(polarities) == 1, vmax - vmid, vmin - vmid)
def func(lons, lats):
"""
Checkboard function: sum of the basis functions along
the centers defined above, times the high/low factor,
plus background velocity.
"""
lowhighs = [f * basis_func(lons, lats, lon0, lat0) for f, (lon0, lat0)
in zip(factors, centerlonlats)]
return vmid + sum(lowhighs)
return func
def checkerboard_test(self, vmid, vmin, vmax, squaresize, **kwargs):
"""
Generates synthetic data (travel time perturbations),
dsynth, from a checkerboard model of velocities, and
performs a tomographic inversion on them:
m = (Gt.C^-1.G + Q)^-1.Gt.C^-1.dsynth
= Ginv.C^-1.dsynth
Returns the vector of best-fitting parameters, m.
@rtype: L{matrix}
"""
# checkerboard function
f_checkerboard = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs)
# setting up vector of synthetic data
dsynth = np.zeros_like(self.dobs)
for d, path, curve in zip(dsynth, self.paths, self.disp_curves):
# array of infinitesimal distances along path
lons, lats = path[:, 0], path[:, 1]
ds = psutils.dist(lons1=lons[:-1], lats1=lats[:-1],
lons2=lons[1:], lats2=lats[1:])
# velocities along path
v = f_checkerboard(lons, lats)
# travel time = integral[ds / v]
t = np.sum(ds * 0.5 * (1.0 / v[:-1] + 1.0 / v[1:]))
# synthetic data = travel time - ref travel time
d[...] = t - curve.dist() / vmid
# inverting synthetic data
m = self.Ginv * self.Cinv * dsynth
return m
def plot(self, xsize=20, title=None, showplot=True, outfile=None, **kwargs):
"""
Plots velocity perturbation, path density
and spatial resolution, and returns the figure.
Additional keyword args in *kwargs* are sent to
self.plot_velocity(), self.plot_pathdensity()
and self.plot_resolution(), when applicable
@rtype: L{matplotlib.figure.Figure}
"""
# bounding box
bbox = self.grid.bbox()
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
figsize = (xsize, aspectratio * xsize / 3.0 + 2)
fig = plt.figure(figsize=figsize)
# layout
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0)
# plotting velocity perturbation
ax = fig.add_subplot(gs[0, 0])
subkwargs = {'ax': ax, 'plot_title': False}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_velocity).args
if k in kwargs})
self.plot_velocity(**subkwargs)
# plotting path density
ax = fig.add_subplot(gs[0, 1])
subkwargs = {'ax': ax, 'plot_title': False, 'stationlabel': True}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_pathdensity).args
if k in kwargs})
self.plot_pathdensity(**subkwargs)
# plotting spatial resolution
ax = fig.add_subplot(gs[0, 2])
subkwargs = {'ax': ax, 'plot_title': False}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_resolution).args
if k in kwargs})
self.plot_resolution(**subkwargs)
# fig title
if not title:
# default title if not given
title = u'Period = {} s, {} paths'
title = title.format(self.period, len(self.paths))
fig.suptitle(title, fontsize=16)
gs.tight_layout(fig, rect=[0, 0, 1, 0.95])
# saving figure
if outfile:
if os.path.exists(outfile):
# backup
shutil.copyfile(outfile, outfile + '~')
fig.set_size_inches(figsize)
fig.savefig(outfile, dpi=300)
# showing figure
if showplot:
fig.show()
return fig
def network_plot(self, ax=None, xsize=10, plotdensity=True, plotpaths=True,
stationlabel=False, plot_title=True, showgrid=False,
highlight_residuals_gt=None):
"""
Plots network of stations using basemap rather than shapefiles!
Also has the option to choose whether or not you want to plot station
pair paths or not
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox)
if plotpaths:
# residuals observed/predicted travel-times
res = self.traveltime_residuals() if highlight_residuals_gt else []
# plotting paths
for i, path in enumerate(self.paths):
x, y = zip(*path)
linestyle = {'color': 'grey', 'lw': 0.5}
if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt:
# highlighting line as the travel-time error is > threshold
linestyle = {'color': 'black', 'lw': 1.5}
ax.plot(x, y, '-', **linestyle)
if showgrid:
# plotting grid
x, y = self.grid.xy_nodes()
ax.plot(x, y, '+')
# plotting stations
self._plot_stations(ax, stationlabel=stationlabel)
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_pathdensity(self, ax=None, xsize=10, plotdensity=True, plotpaths=True,
stationlabel=False, plot_title=True, showgrid=False,
highlight_residuals_gt=None):
"""
Plots path density and/or interstation paths.
Paths for which the residual observed/predicted travel-time
is greater than *highlight_residuals_gt* (if defined) are
highlighted as bold lines.
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox)
if plotdensity:
# plotting path density
d = self.grid.to_2D_array(self.density)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(d.transpose(),
origin='bottom',
extent=extent,
interpolation='bicubic',
cmap=CMAP_DENSITY,
vmin=0)
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Path density')
if plotpaths:
# residuals observed/predicted travel-times
res = self.traveltime_residuals() if highlight_residuals_gt else []
# plotting paths
for i, path in enumerate(self.paths):
x, y = zip(*path)
linestyle = {'color': 'grey', 'lw': 0.5}
if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt:
# highlighting line as the travel-time error is > threshold
linestyle = {'color': 'black', 'lw': 1.5}
ax.plot(x, y, '-', **linestyle)
if showgrid:
# plotting grid
x, y = self.grid.xy_nodes()
ax.plot(x, y, '+')
# plotting stations
self._plot_stations(ax, stationlabel=stationlabel)
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_velocity(self, ax=None, xsize=10, perturbation=False, plot_title=True,
vscale=None):
"""
Plots velocity or perturbation relative to mean velocity
(which is not necessarily the reference velocity)
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize))
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# plotting stations
self._plot_stations(ax, stationlabel=False)
# velocities on grid: m = (v0 - v) / v, so v = v0 / (1 + m)
v = self.grid.to_2D_array(self.v0 / (1 + self.mopt))
vmean = v.mean()
if perturbation:
# plotting % perturbation relative to mean velocity
v = 100 * (v - vmean) / vmean
if not vscale and perturbation:
# symetric scale
maxdv = np.abs(v).max()
vscale = (-maxdv, maxdv)
elif not vscale and not perturbation:
# scale centered on mean velocity
maxdv = np.abs(v - vmean).max()
vscale = (vmean - maxdv, vmean + maxdv)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(v.transpose(), origin='bottom', extent=extent,
interpolation='bicubic', cmap=CMAP_SEISMIC,
vmin=vscale[0], vmax=vscale[1])
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Velocity perturbation (%)' if perturbation else 'Velocity (km/s)')
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_resolution(self, ax=None, xsize=10, plot_title=True):
"""
Plots resolution map
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# plotting stations
self._plot_stations(ax, stationlabel=False)
# plotting spatial resolution
r = self.grid.to_2D_array(self.Rradius)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(r.transpose(), origin='bottom', extent=extent,
interpolation='bicubic',
cmap=CMAP_RESOLUTION)
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Spatial resolution (km)')
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_checkerboard(self, vmid, vmin, vmax, squaresize, axes=None, xsize=10,
**kwargs):
"""
Plots checkboard model and reconstructed checkerboard
"""
# checkerboard test
m = self.checkerboard_test(vmid, vmin, vmax, squaresize, **kwargs)
v = self.grid.to_2D_array(vmid / (1 + m))
dv = 100 * (v - vmid) / vmid
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not axes:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
axes = [fig.add_subplot(121), fig.add_subplot(122)]
ims = []
# checkerboard model
checkerboard_func = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs)
lons, lats = self.grid.xy_nodes()
a = self.grid.to_2D_array(checkerboard_func(lons, lats))
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
im = axes[0].imshow(a.transpose(),
origin='bottom', extent=extent,
interpolation='bicubic',
vmin=vmin, vmax=vmax,
cmap=CMAP_SEISMIC)
ims.append(im)
# reconstructed checkerboard
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
im = axes[1].imshow(dv.transpose(),
origin='bottom', extent=extent,
interpolation='bicubic',
vmin=-np.abs(dv).max(),
vmax=np.abs(dv).max(),
cmap=CMAP_SEISMIC)
ims.append(im)
for ax, im in zip(axes, ims):
# coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# stations
self._plot_stations(ax, stationlabel=False)
# color bar
c = plt.colorbar(im, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('km/s' if ax is axes[0] else '% perturbation')
# limits
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if fig:
fig.show()
def _plot_stations(self, ax, stationlabel):
"""
Plots stations on map
"""
# plotting stations
xylabels = [c.station1.coord + (c.station1.name,) for c in self.disp_curves] + \
[c.station2.coord + (c.station2.name,) for c in self.disp_curves]
xlist, ylist, labels = zip(*list(set(xylabels)))
ax.plot(xlist, ylist, '^', color='k', ms=10, mfc='w', mew=1)
if not stationlabel:
return
# stations label
for x, y, label in zip(xlist, ylist, labels):
ax.text(x, y, label, ha='center', va='bottom', fontsize=10, weight='bold')
def pathdensity_colormap(dmax):
"""
Builds a colormap for path density (d) varying from
0 to *dmax*:
- white for d = 0
- blue to green for 1 <= d <= 5
- green to red for 5 <= d <= 10
- red to black for 10 <= d <= dmax
"""
dmax = max(dmax, 11)
x1 = 1.0 / dmax
x2 = 5.0 / dmax
x3 = 10.0 / dmax
cdict = {'red': ((0, 1, 1), (x1, 0, 0), (x2, 0, 0), (x3, 1, 1), (1, 0, 0)),
'green': ((0, 1, 1), (x1, 0, 0), (x2, 1, 1), (x3, 0, 0), (1, 0, 0)),
'blue': ((0, 1, 1), (x1, 1, 1), (x2, 0, 0), (x3, 0, 0), (1, 0, 0))}
return LinearSegmentedColormap('tmp', cdict)
if __name__ == '__main__':
# importig dir of FTAN results
from psconfig import FTAN_DIR
# loading dispersion curves
flist = sorted(glob.glob(os.path.join(FTAN_DIR, 'FTAN*.pickle*')))
print 'Select file containing dispersion curves:'
print '\n'.join('{} - {}'.format(i, os.path.basename(f)) for i, f in enumerate(flist))
pickle_file = flist[int(raw_input('\n'))]
f = open(pickle_file, 'rb')
curves = pickle.load(f)
f.close()
print "Dispersion curves stored in variable 'curves'" | gpl-3.0 |
Lawrence-Liu/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
jmmease/pandas | pandas/tests/plotting/test_frame.py | 3 | 119068 | # coding: utf-8
""" Test cases for DataFrame.plot """
import pytest
import string
import warnings
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.core.dtypes.api import is_list_like
from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
import numpy as np
from numpy.random import rand, randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_if_no_mpl()
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
@pytest.mark.slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# mpl >= 1.5.2 (or slightly below) throw AttributError
with pytest.raises((TypeError, AttributeError)):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
# test CN mpl 2.0 color cycle
if self.mpl_ge_2_0_0:
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
_check_plot_works(df.plot, color=c)
else:
pytest.skip("not supported in matplotlib < 2.0.0")
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=['red'])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0))
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color='')
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=['red', 'black'], style=['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ['-', '--']
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ['red', 'black']
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=['red', 'black'], style=['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
assert df.index.name == 'NAME'
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'), df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1),
figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)}, index=np.arange(99, -1, -1),
dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({'y': [0., 1., 2., 3.]}, index=[1., 0., 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., np.nan, 3., 4., 5., 6.]},
index=[1., 0., 3., 2., np.nan, 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., 2., 3.], 'z': [91., 90., 93., 92.]})
ax = df.plot(x='z', y='y')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3, )
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax,
labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45,
fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {"numeric": np.array([1, 2, 5]),
"timedelta": [pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h")],
"datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")],
"datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00",
utc=True),
pd.to_datetime("2017-08-02 00:00:00",
utc=True)],
"text": ["This", "should", "fail"]}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (ax_numeric.get_lines()[0].get_data()[1] ==
testdata["numeric"].values).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (ax_timedelta.get_lines()[0].get_data()[1] ==
testdata["timedelta"].values).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_no_tz"].values).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_all_tz"].values).all()
with pytest.raises(TypeError):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
'datetime_mixed_tz')
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formater (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formater (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handels ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {"numeric": np.array([1, 2, 5]),
"period": [pd.Period('2017-08-01 00:00:00', freq='H'),
pd.Period('2017-08-01 02:00', freq='H'),
pd.Period('2017-08-02 00:00:00', freq='H')],
"categorical": pd.Categorical(["c", "b", "a"],
categories=["a", "b", "c"],
ordered=False),
"datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")]}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (ax_period.get_lines()[0].get_data()[1] ==
testdata["period"].values).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (ax_categorical.get_lines()[0].get_data()[1] ==
testdata["categorical"].values).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric",
y="datetime_mixed_tz")
assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_mixed_tz"].values).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=['w', 'x', 'y', 'z'])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6),
'x': rand(6),
'y': -rand(6),
'z': -rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1,
'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four'])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame({"A": range(4),
"B": range(1, 5),
"color": ['red', 'blue', 'blue', 'red']})
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y='A', color=df['color'])
result = [p.get_facecolor() for p in ax.patches]
expected = [(1., 0., 0., 1.),
(0., 0., 1., 1.),
(0., 0., 1., 1.),
(1., 0., 0., 1.)]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
position=0.2)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind='bar', stacked=True, width=1)
self._check_bar_alignment(df, kind='barh', stacked=False, width=1)
self._check_bar_alignment(df, kind='barh', stacked=True, width=1)
self._check_bar_alignment(df, kind='bar', subplots=True, width=1)
self._check_bar_alignment(df, kind='barh', subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20],
'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(np.random.randn(6, 5),
index=pd.Index(list('ABCDEF')),
columns=pd.Index(list('abcde')))
# categorical index must behave the same
df2 = pd.DataFrame(np.random.randn(6, 5),
index=pd.CategoricalIndex(list('ABCDEF')),
columns=pd.CategoricalIndex(list('abcde')))
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x='x')
with pytest.raises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self):
# GH 16199
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])})
with pytest.raises(ValueError) as ve:
df.plot(x='x', y='y', kind='scatter')
ve.match('requires y column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='x', kind='scatter')
ve.match('requires x column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='y', kind='scatter')
ve.match('requires x column to be numeric')
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0]
.get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64))
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center', width=0.5,
position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position, grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches
])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(
df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
align='edge')
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# different warning on py3
if not PY3:
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box,
subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box() # default axes
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(result, None, expected_keys=[
'height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@pytest.mark.slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind='kde',
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@pytest.mark.slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
pytest.skip("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y,
check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h,
check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x,
check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w,
check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True,
orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True,
orientation='horizontal')
self._check_box_coord(axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(
df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False, label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@pytest.mark.slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^',
1: '+',
2: 'o'}, {0: '^',
1: '+'}, ['^', '+', 'o'], ['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == 'None'
@pytest.mark.slow
@tm.capture_stdout
def test_line_colors(self):
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ['r', 'g', 'b']
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
if self.mpl_ge_2_0_0:
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=custom_colors)
else:
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=jet_colors)
else:
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
linecolors = jet_with_alpha
else:
# Line2D can't have alpha in its linecolor
linecolors = jet_colors
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@pytest.mark.slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind='kde', color='DodgerBlue',
subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = 'k' if self.mpl_ge_2_0_0 else 'b'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'],
linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'],
linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0],
default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 1, 0), '#123456')
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind='aasdf')
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
assert len(ax.collections) == 1
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
assert ax.collections[0].cmap.name == 'BuGn'
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
assert ax.collections[0].cmap.name == 'YlGn'
with pytest.raises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn', colormap='BuGn')
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True, labels=labels,
colors=color_args)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert ([x.get_text() for x in ax.get_legend().get_texts()] ==
base_expected[:i] + base_expected[i + 1:])
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'],
xerr=df_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df + 1).plot, yerr=df_err,
xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot,
yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'z': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
data = df.values
ax = df.plot(yerr=err, xerr=err / 2)
if self.mpl_ge_2_0_0:
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
else:
assert ax.lines[7].get_ydata()[0] == data[0, 1] - err[1, 0, 0]
assert ax.lines[8].get_ydata()[0] == data[0, 1] + err[1, 1, 0]
assert ax.lines[5].get_xdata()[0] == -err[1, 0, 0] / 2
assert ax.lines[6].get_xdata()[0] == err[1, 1, 0] / 2
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err,
yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
lines = []
errs = [c.lines
for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._core._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}),
plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'})
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with pytest.raises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
try: # mpl 1.5
with mpl.rc_context(
rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
except (AttributeError, KeyError): # mpl 1.4
with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize('method', ['line', 'barh', 'bar'])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (pd.DataFrame(np.random.randn(15, 2),
columns=list('AB'))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1))
fontsize = 20
sy = ['C', 'D']
kwargs = dict(secondary_y=sy, fontsize=fontsize,
mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax,
ylabelsize=fontsize)
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| bsd-3-clause |
sodafree/backend | build/ipython/docs/examples/parallel/dagdeps.py | 6 | 3566 | """Example for generating an arbitrary DAG as a dependency map.
This demo uses networkx to generate the graph.
Authors
-------
* MinRK
"""
import networkx as nx
from random import randint, random
from IPython import parallel
def randomwait():
import time
from random import random
time.sleep(random())
return time.time()
def random_dag(nodes, edges):
"""Generate a random Directed Acyclic Graph (DAG) with a given number of nodes and edges."""
G = nx.DiGraph()
for i in range(nodes):
G.add_node(i)
while edges > 0:
a = randint(0,nodes-1)
b=a
while b==a:
b = randint(0,nodes-1)
G.add_edge(a,b)
if nx.is_directed_acyclic_graph(G):
edges -= 1
else:
# we closed a loop!
G.remove_edge(a,b)
return G
def add_children(G, parent, level, n=2):
"""Add children recursively to a binary tree."""
if level == 0:
return
for i in range(n):
child = parent+str(i)
G.add_node(child)
G.add_edge(parent,child)
add_children(G, child, level-1, n)
def make_bintree(levels):
"""Make a symmetrical binary tree with @levels"""
G = nx.DiGraph()
root = '0'
G.add_node(root)
add_children(G, root, levels, 2)
return G
def submit_jobs(view, G, jobs):
"""Submit jobs via client where G describes the time dependencies."""
results = {}
for node in nx.topological_sort(G):
with view.temp_flags(after=[ results[n] for n in G.predecessors(node) ]):
results[node] = view.apply(jobs[node])
return results
def validate_tree(G, results):
"""Validate that jobs executed after their dependencies."""
for node in G:
started = results[node].metadata.started
for parent in G.predecessors(node):
finished = results[parent].metadata.completed
assert started > finished, "%s should have happened after %s"%(node, parent)
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results
if __name__ == '__main__':
from matplotlib import pyplot as plt
# main(5,10)
main(32,96)
plt.show()
| bsd-3-clause |
asoliveira/NumShip | scripts/plot/r-velo-r-zz-plt.py | 1 | 3085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'r-velo-r-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/velo.dat')
acelhis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/velo.dat')
acelhis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/velo.dat')
acelhis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/velo.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -0.7, 0.7]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$ r\prime$'
else:
ylabel = r'$\dot \psi \quad graus/s$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 6] * (180/sp.pi), color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 6] * (180/sp.pi), color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$r$')
plt.plot(acelhis3[:, 0], acelhis3[:, 6] * (180/sp.pi), color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$r$')
plt.plot(acelhis4[:, 0], acelhis4[:, 6] * (180/sp.pi), color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$r$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle = "--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle = "--",
linewidth = 1, label=ur'leme--1.1$r$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle = "--",
linewidth = 1, label=ur'leme--1.2$r$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle = "--",
linewidth = 1, label=ur'leme--1.3$r$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
ogvalt/saturn | spiking_som.py | 1 | 18544 |
from brian2 import *
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import matplotlib.pyplot as plt
from dataset import ArtificialDataSet
class ReceptiveField:
# Parameter that used in standard deviation definition
gamma = 1.5
def __init__(self, bank_size=10, I_min=0.0, I_max=1.0):
self.bank_size = bank_size
self.field_mu = np.array([(I_min + ((2 * i - 2) / 2) * ((I_max - I_min) / (bank_size - 1)))
for i in range(1, bank_size + 1)])
self.field_sigma = (1.0 / self.gamma) * (I_max - I_min)
def float_to_membrane_potential(self, input_vector):
try:
input_vector = input_vector.reshape((input_vector.shape[0], 1))
except Exception as exc:
print("Exception: {0}\nObject shape: {1}".format(repr(exc), input_vector.shape))
exit(1)
temp = np.exp(-((input_vector - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector - 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector + 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
return temp
if __name__ == "__main__":
prefs.codegen.target = 'numpy'
np.random.seed(1)
seed(1)
np.set_printoptions(suppress=True)
bank_size = 10
diff_method = 'euler'
# inputs = np.random.rand(3)
# inputs = np.array([0.332, 0.167, 0.946])
# inputs = np.array([0.013, 0.3401, 0.2196])
# inputs = np.array([0.829, 0.7452, 0.6728])
# print(inputs)
# N = inputs.shape[0] * bank_size
N = 20
rf = ReceptiveField(bank_size=bank_size, I_min=0.05, I_max=0.95)
# potential_input = rf.float_to_membrane_potential(inputs)
# potential_input = potential_input.flatten()
# TABLE 1
# (A) Neuronal parameters, used in (1) and (4)
time_step = 0.01;
tau_m = 10.0 * ms;
tau_m_inh = 5 * ms;
tau_m_som = 3 * ms
theta_reset_u = -0.5;
theta_reset_inh = -0.0;
theta_reset_som = 0.0
theta_u = 0.5;
theta_u_inh = 0.01;
theta_som = 0.8
# (B) Synaptic parameters, used in (2) and (3) for different synapse types
# temporal layer to som layer (u to v)
tau_r_afferent = 0.2 * ms;
tau_f_afferent = 1.0 * ms
# temporal layer (u to inh exc, u to inh inh, inh to u)
tau_r_exc = 0.4 * ms;
tau_f_exc = 2.0 * ms;
tau_r_inh = 0.2 * ms;
tau_f_inh = 1.0 * ms
tau_r_inh2u = 1.0 * ms;
tau_f_inh2u = 5.0 * ms
# som layer
tau_r_lateral = 0.1 * ms;
tau_f_lateral = 0.5 * ms
# (C) Maximum magnitudes of synaptic connection strength
w_syn_temporal_to_som_max = 2.2;
w_syn_u2inh_exc_max = 1.0;
w_syn_u2inh_inh_max = 1.0;
w_syn_inh2u_max = 100.0
w_syn_som_to_som_max = 1.0
# (D) Neighbourhood parameters, used in (6) and (7), for layer v (som)
a = 3.0;
b = 3.0;
X = 3.0;
X_ = 3.0
# (E) Learning parameter, used in (5)
# A_plus - Max synaptic strength, A_minus - max synaptic weakness; tau_plus, tau_minus - time constant of STDP
A_plus = 0.0016;
A_minus = 0.0055;
tau_plus = 11;
tau_minus = 10
# used in (7)
T = 10.0;
power_n = 2.0
# used in (6)
pi = np.pi
# size of the self-organizing map
map_size = 10
temporal_layer_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection to u layer
ds_inh2u/dt = (-s_inh2u)/tau_r_inh2u: 1
dw_inh2u/dt = (s_inh2u - w_inh2u)/tau_f_inh2u: 1
# membrane potential of u layer
dv/dt = (-v + I_ext - w_inh2u) / tau_m: 1
I_ext : 1
'''
inhibition_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection
# s_inh - internal variable
# w_inh - output potential
ds_inh/dt = (-s_inh)/tau_r_inh: 1
dw_inh/dt = (s_inh - w_inh)/tau_f_inh: 1
# excitation connection
# s_exc - internal variable
# w_exc - output potential
ds_exc/dt = (-s_exc)/tau_r_exc: 1
dw_exc/dt = (s_exc - w_exc)/tau_f_exc: 1
# diff equation membrane potential of inhibition neuron
dv/dt = (-v + w_exc - w_inh) / tau_m_inh: 1
'''
som_layer_neuron_equ = '''
dglobal_time/dt = 1 / ms : 1
dtime/dt = 1 / ms : 1
# Afferent connection (from temporal layer to som layer)
ds_afferent/dt = (-s_afferent)/tau_r_afferent: 1
dw_afferent/dt = (s_afferent - w_afferent)/tau_f_afferent: 1
# lateral connection
ds_lateral/dt = (-s_lateral)/tau_r_lateral: 1
dw_lateral/dt = (s_lateral - w_lateral)/tau_f_lateral: 1
# membrane potential of u layer
dv/dt = (-v + w_lateral + w_afferent) / tau_m_som: 1
'''
temporal_layer = NeuronGroup(N, temporal_layer_neuron_equ, threshold='v>theta_u', method=diff_method,
reset='''v = theta_reset_u; time = 0''')
# temporal_layer.I_ext = potential_input
# inhibition neuron
inhibition_neuron = NeuronGroup(1, inhibition_neuron_equ, threshold='v>theta_u_inh', method=diff_method,
reset='''v = theta_reset_inh; time = 0''')
# self-organizing layer
som_layer = NeuronGroup(map_size * map_size, som_layer_neuron_equ, threshold='v>theta_som', method=diff_method,
reset='''v = theta_reset_som; time = 0''')
# v to inh neuron, excitation connection
u2inh_excitation = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_exc += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_exc_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) ** time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_exc_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_excitation.connect(i=np.arange(N), j=0)
u2inh_excitation.w_syn = 'rand() * w_syn_u2inh_exc_max'
# v to inh neuron, inhibition connection
u2inh_inhibition = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_inh += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_inh_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_inh_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_inhibition.connect(i=np.arange(N), j=0)
u2inh_inhibition.w_syn = 'rand() * w_syn_u2inh_inh_max'
# inh neuron to v, inhibition connection
inh2u_inhibition = Synapses(inhibition_neuron, target=temporal_layer, method=diff_method,
on_pre='''
s_inh2u += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_inh2u_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_inh2u_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
inh2u_inhibition.connect(i=0, j=np.arange(N))
# inh2u_inhibition.w_syn = 'rand() * w_syn_inh2u_max'
inh2u_inhibition.w_syn = 0.5 * w_syn_inh2u_max
# som lateral connection
som_synapse = Synapses(som_layer, target=som_layer, method=diff_method,
on_pre='''
radius = X - (X - X_)/(1+(2**0.5 - 1)*((global_time/T)**(2 * power_n)))
y_pre = floor(i / map_size)
x_pre = i - y_pre * map_size
y_post = floor(j/map_size)
x_post = j - y_post * map_size
dist = (x_post - x_pre)**2 + (y_post - y_pre)**2
G1 = (1 + a) * exp(- dist/(radius**2)) / (2 * pi * radius**2)
G2 = a * exp(- dist/(b * radius)**2) / (2 * pi * (b * radius)**2)
w_syn = clip(G1 + G2, 0, w_syn_som_to_som_max)
s_lateral += w_syn
''',
on_post='''
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
''')
som_synapse.connect(condition='i!=j')
# som afferent connection
temporal_to_som_synapse = Synapses(temporal_layer, target=som_layer, method=diff_method,
on_pre='''
s_afferent += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_temporal_to_som_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_temporal_to_som_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
temporal_to_som_synapse.connect()
temporal_to_som_synapse.w_syn = np.random.randint(low=40000, high=60000, size=N*map_size*map_size) \
* w_syn_temporal_to_som_max / 100000.0
# Visualization
som_spike_mon = SpikeMonitor(som_layer)
u_spike_mon = SpikeMonitor(temporal_layer)
# u_state_mon_v = StateMonitor(temporal_layer, 'v', record=True)
# u_state_mon_time = StateMonitor(temporal_layer, 'time', record=True)
# u_state_mon_w = StateMonitor(temporal_layer, 'w_inh2u', record=True)
inh_spike_mon = SpikeMonitor(inhibition_neuron)
# inh_state_mon = StateMonitor(inhibition_neuron, 'v', record=True)
# w_exc_neu_state = StateMonitor(inhibition_neuron, 'w_exc', record=True)
# w_inh_neu_state = StateMonitor(inhibition_neuron, 'w_inh', record=True)
#
# w_syn_u2inh_exc = StateMonitor(u2inh_excitation, 'w_syn', record=True)
defaultclock.dt = time_step * ms
step = 2
plasticity_state = False
u2inh_excitation.plasticity = plasticity_state
u2inh_inhibition.plasticity = plasticity_state
inh2u_inhibition.plasticity = plasticity_state
temporal_to_som_synapse.plasticity = True # plasticity_state
# simulation_time = 200
# run(simulation_time * ms, report='text')
# weight visualization
# simulation
simulation_time = 50
attempts = 5
dataset = ArtificialDataSet(500, int(N/10))
dataset = dataset.generate_set()
np.savetxt('dataset.txt', dataset, delimiter=';')
plt.scatter(dataset[:, 0], dataset[:, 1], s=5)
plt.show()
net_model = Network(collect())
net_model.store()
for vector in dataset:
for it in range(attempts):
net_model.restore()
print("Input vector: {0}, attempt: {1}".format(vector, it))
potential_input = rf.float_to_membrane_potential(vector)
potential_input = potential_input.flatten()
temporal_layer.I_ext = potential_input
net_model.run(simulation_time * ms, report='text')
net_model.store()
# visual
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="som")
win.resize(1000, 600)
win.setWindowTitle('brain')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="Region Selection")
p1.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p1.showGrid(x=True, y=True)
lr = pg.LinearRegionItem([0, simulation_time])
lr.setZValue(0)
p1.addItem(lr)
p2 = win.addPlot(title="Zoom on selected region")
p2.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p2.showGrid(x=True, y=True)
def updatePlot():
p2.setXRange(*lr.getRegion(), padding=0)
def updateRegion():
lr.setRegion(p2.getViewBox().viewRange()[0])
lr.sigRegionChanged.connect(updatePlot)
p2.sigXRangeChanged.connect(updateRegion)
updatePlot()
win.nextRow()
p3 = win.addPlot(title="Region Selection")
p3.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p3.showGrid(x=True, y=True)
lr1 = pg.LinearRegionItem([0, 10])
lr1.setZValue(0)
p3.addItem(lr1)
p4 = win.addPlot(title="Zoom on selected region")
p4.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p4.showGrid(x=True, y=True)
def updatePlot2():
p4.setXRange(*lr1.getRegion(), padding=0)
def updateRegion2():
lr1.setRegion(p4.getViewBox().viewRange()[0])
lr1.sigRegionChanged.connect(updatePlot2)
p4.sigXRangeChanged.connect(updateRegion2)
updatePlot2()
u2som_syn_shape = temporal_to_som_synapse.w_syn[:].shape
picture = temporal_to_som_synapse.w_syn[:].reshape(N, int(u2som_syn_shape[0] / N))
np.savetxt('weights.txt', picture, delimiter=';')
win2 = QtGui.QMainWindow()
win2.resize(800, 800)
imv = pg.ImageView()
win2.setCentralWidget(imv)
win2.show()
win2.setWindowTitle("SOM weights")
imv.setImage(picture)
# subplot(421)
# # subplot(111)
# title("Temporal layer spikes")
# plot(u_spike_mon.t / ms, u_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, N + 1, 1))
#
# # show()
#
# subplot(422)
# title("Inhibition neuron spikes")
# plot(inh_spike_mon.t / ms, inh_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, 1, 1))
#
# subplot(423)
# title("u membrane potential")
# for item in u_state_mon_v:
# plot(u_state_mon_v.t / ms, item.v)
# # plot(u_state_mon_v.t / ms, u_state_mon_v[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(424)
# title("Inhibition neuron membrane potential")
# plot(inh_state_mon.t / ms, inh_state_mon[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(425)
# title("Excitation/inhibition interaction")
# plot(w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc, w_exc_neu_state.t / ms, w_inh_neu_state[0].w_inh,
# w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc - w_inh_neu_state[0].w_inh)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(426)
# title("Inhibition to u potential")
# plot(u_state_mon_w.t / ms, u_state_mon_w[0].w_inh2u)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(427)
# title("Synaptic Weight")
# for item in w_syn_u2inh_exc:
# plot(w_syn_u2inh_exc.t / ms, item.w_syn)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-0.1, 1.1, 0.1))
#
# subplot(428)
# title("Synaptic time pre spike")
# for item in u_state_mon_time:
# plot(w_syn_u2inh_exc.t / ms, item.time)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# show()
#
# # subplot(111)
# title("Som layer spikes")
# plot(som_spike_mon.t / ms, som_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, map_size * map_size + 1, 1))
#
# show()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_backend_pdf.py | 2 | 6994 | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import tempfile
import pytest
import numpy as np
from matplotlib import checkdep_usetex, cm, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
from matplotlib.testing.determinism import (_determinism_source_date_epoch,
_determinism_check)
from matplotlib.testing.decorators import image_comparison
from matplotlib import dviread
from matplotlib.testing.compare import compare_images
import matplotlib as mpl
needs_usetex = pytest.mark.xfail(
not checkdep_usetex(True),
reason="This test needs a TeX installation")
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Test PDF backend with option use14corefonts=True')
ax.text(0.5, 0.5, text, horizontalalignment='center',
verticalalignment='bottom',
fontsize=14)
ax.axhline(0.5, linewidth=0.5)
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
def test_composite_image():
# Test that figures can be saved with and without combining multiple images
# (on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 2
def test_source_date_epoch():
"""Test SOURCE_DATE_EPOCH support for PDF output"""
_determinism_source_date_epoch("pdf", b"/CreationDate (D:20000101000000Z)")
def test_determinism_plain():
"""Test for reproducible PDF output: simple figure"""
_determinism_check('', format="pdf")
def test_determinism_images():
"""Test for reproducible PDF output: figure with different images"""
_determinism_check('i', format="pdf")
def test_determinism_hatches():
"""Test for reproducible PDF output: figure with different hatches"""
_determinism_check('h', format="pdf")
def test_determinism_markers():
"""Test for reproducible PDF output: figure with different markers"""
_determinism_check('m', format="pdf")
def test_determinism_all():
"""Test for reproducible PDF output"""
_determinism_check(format="pdf")
@image_comparison(baseline_images=['hatching_legend'],
extensions=['pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(baseline_images=['grayscale_alpha'],
extensions=['pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with tempfile.TemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='pdf')
@pytest.mark.style('default')
def test_pdf_savefig_when_color_is_none(tmpdir):
fig, ax = plt.subplots()
plt.axis('off')
ax.plot(np.sin(np.linspace(-5, 5, 100)), 'v', c='none')
actual_image = tmpdir.join('figure.pdf')
expected_image = tmpdir.join('figure.eps')
fig.savefig(str(actual_image), format='pdf')
fig.savefig(str(expected_image), format='eps')
result = compare_images(str(actual_image), str(expected_image), 0)
assert result is None
| mit |
miguelzuma/montepython_zuma | montepython/analyze.py | 1 | 95056 | """
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <benabed@iap.fr>
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# compute covariance matrix, excepted when we are in update mode and convergence is too bad or too good
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4):
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a Nested Sampling
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from Nested Sampling to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the Nested Sampling run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a Nested Sampling or CosmoHammer folder. If
# so, call the module's own routine through the clean conversion function,
# which will translate the output of this other sampling into MCMC chains
# that can then be analyzed.
modules = ['nested_sampling', 'cosmo_hammer']
tags = ['NS', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# gaussian filtered 1d posterior:
smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# smooth
smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
except ValueError as e:
if str(e) == "Contour levels must be increasing":
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot. \n " % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]) +
'The error is: "Contour levels must be increasing"' +
" but " + str(ctr_level(info.n, info.levels[:2])) +
" were found. This may happen when most" +
" points fall in the same bin.")
else:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = max(info.boundaries[info.native_index][0],bincenters[0]-2.5*(bincenters[1]-bincenters[0]))
right = min(info.boundaries[info.native_index][1],bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2]))
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundarty is hit
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outisde of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = max(info.boundaries[info.native_index][0],bincenters[0])
right = min(info.boundaries[info.native_index][1],bincenters[-1])
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
# The following variable defines the substring that identify error_log
# files and therefore there must not be taken into account in the analysis.
substring_err = 'error_log'
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
scale = info.new_scales[name]
rescale = info.new_scales[name]/array[4]
scales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./scale
tex_names.append(
io_mp.get_tex_name(name, number=number))
scales = np.diag(scales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
start = int(line.split()[2])
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.scales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
| mit |
budnyjj/bsuir_magistracy | disciplines/OTOS/lab_1/lab.py | 1 | 1813 | #!/usr/bin/env python
import functools
import math
import random
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# 1D model
def model(x):
a = 2.7; d = 0.1; y_0 = 2
sigma = 0.001
result = y_0 - 0.04 * (x - a) - d * (x - a)**2
return result + random.gauss(0, sigma)
def search_asymmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
def search_symmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x - alpha))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
NUM_ITER = 1000
MIN_X = 1; MAX_X = 10; NUM_X = 100
VALS_X = np.linspace(MIN_X, MAX_X, NUM_X)
model_vec = np.vectorize(model)
plt.plot(VALS_X, model_vec(VALS_X),
color='r', linestyle=' ',
marker='.', markersize=5,
label='model')
search_asymmetric_x = search_asymmetric(model, MAX_X, NUM_ITER)
plt.plot(search_asymmetric_x, model_vec(search_asymmetric_x),
color='g', marker='x', markersize=5,
label='asymmetric')
search_symmetric_x = search_symmetric(model, MAX_X, NUM_ITER)
plt.plot(search_symmetric_x, model_vec(search_symmetric_x),
color='b', marker='x', markersize=5,
label='symmetric')
plt.xlabel('$ x $')
plt.ylabel('$ y $')
plt.grid(True)
# plt.legend(loc=2)
plt.savefig('plot.png', dpi=200)
| gpl-3.0 |
u3099811/BaxterTictacToe | src/baxter_interface/src/joint_trajectory_action/bezier.py | 3 | 13110 | #! /usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Copyright (c) 2011, Ian McMahon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Ian McMahon nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
The Bezier library was implemented as a class project in CIS515,
Fundamentals of Linear Algebra, taught by Professor Jean Gallier
in the summer of 2011 at the University of Pennsylvania. For an
excellent explanation of Cubic Bezier Curves, and the math
represented in this library, see
http://www.cis.upenn.edu/~cis515/proj1-12.pdf
~~~~~~~~~~~~~~~~~~~~~~~~ Bezier ~~~~~~~~~~~~~~~~~~~~~~~~
A library for computing Bezier Cubic Splines for an arbitrary
set of control points in R2, R3, up to RN space.
Cubic Segment:
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1 + 3(1 - t)*t^2*b2 + t^3*b3
Bezier Spline of Cubic Segments:
B(t) = C_(i)(t-i+1), i-1 <= t <= i
where C0 continuity exists: C_(i)(1) = C_(i+1)(0)
where C1 continuity exists: C'_(i)(1) = C'_(i+1)(0)
and where C2 continuity exists: C"_(i)(1) = C"_(i+1)(0)
ex. usage:
import numpy
import bezier
points_array = numpy.array([[1, 2, 3], [4, 4, 4],
[6, 4, 6], [2, 5, 6],
[5, 6, 7]])
d_pts = bezier.de_boor_control_pts(points_array)
b_coeffs = bezier.bezier_coefficients(points_array, d_pts)
b_curve = bezier.bezier_curve(b_coeffs, 50)
# plotting example
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
#plot bezier curve
ax.plot(b_curve[:,0], b_curve[:,1], b_curve[:,2])
#plot specified points
ax.plot(points_array[:,0], points_array[:,1], points_array[:,2], 'g*')
ax.set_title("Cubic Bezier Spline")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.legend(["Bezier Curve", "Control Points"], loc=2)
plt.show()
"""
import numpy as np
def de_boor_control_pts(points_array, d0=None,
dN=None, natural=True):
"""
Compute the de Boor control points for a given
set for control points
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of input control points
k is the number of dimensions for each point
d0: the first control point - None if "natural"
numpy.array of size 1 by k
dN: the last control point - None if "natural"
numpy.array of size 1 by k
natural: flag to signify natural start/end conditions
bool
returns:
d_pts: array of de Boor control points
numpy.array of size N+3 by k
"""
# N+3 auxiliary points required to compute d_pts
# dpts_(-1) = x_(0)
# dpts_(N+1) = x_(N)
# so it is only necessary to find N+1 pts, dpts_(0) to to dpts_(N)
(rows, k) = np.shape(points_array)
N = rows - 1 # minus 1 because list includes x_(0)
# Compute A matrix
if natural:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [4, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 4]
else:
A = 4.0
else:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [3.5, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 3.5]
else:
A = 3.5
for i in range(1, N-2):
A[np.ix_([i], [i-1, i, i+1])] = [1, 4, 1]
# Construct de Boor Control Points from A matrix
d_pts = np.zeros((N+3, k))
for col in range(0, k):
x = np.zeros((max(N-1, 1), 1))
if N > 2:
# Compute start / end conditions
if natural:
x[N-2, 0] = 6*points_array[-2, col] - points_array[-1, col]
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[N-2, 0] = 6*points_array[-2, col] - 1.5*dN[0, col]
x[0, 0] = 6*points_array[1, col] - 1.5*d0[0, col]
x[range(1, N-3+1), 0] = 6*points_array[range(2, N-2+1), col]
# Solve bezier interpolation
d_pts[2:N+1, col] = np.linalg.solve(A, x).T
else:
# Compute start / end conditions
if natural:
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[0, 0] = 6*points_array[1, col] - 1.5*d0[col]
# Solve bezier interpolation
d_pts[2, col] = x / A
# Store off start and end positions
d_pts[0, :] = points_array[0, :]
d_pts[-1, :] = points_array[-1, :]
# Compute the second to last de Boor point based on end conditions
if natural:
one_third = (1.0/3.0)
two_thirds = (2.0/3.0)
d_pts[1, :] = (two_thirds)*points_array[0, :] + (one_third)*d_pts[2, :]
d_pts[N+1, :] = ((one_third)*d_pts[-3, :] +
(two_thirds)*points_array[-1, :])
else:
d_pts[1, :] = d0
d_pts[N+1, :] = dN
return d_pts
def bezier_coefficients(points_array, d_pts):
"""
Compute the Bezier coefficients for a given
set for user-supplied control pts and
de Boor control pts.
These B coeffs are used to compute the cubic
splines for each cubic spline segment as
follows (where t is a percentage of time between
b_coeff segments):
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1
+ 3(1 - t)*t^2*b2 + t^3*b3
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of control points
k is the number of dimensions for each point
d_pts: array of de Boor control points
numpy.array of size N+3 by k
returns:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
"""
(rows, k) = np.shape(points_array)
N = rows - 1 # N minus 1 because points array includes x_0
b_coeffs = np.zeros(shape=(k, N, 4))
for i in range(0, N):
points_array_i = i+1
d_pts_i = i + 2
if i == 0:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = d_pts[d_pts_i - 1, axis_pos]
b_coeffs[axis_pos, i, 2] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
elif i == N-1:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 2] = d_pts[d_pts_i, axis_pos]
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
else:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (2.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 1.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 2] = (1.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 2.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
return b_coeffs
def _cubic_spline_point(b_coeff, t):
"""
Internal convenience function for calculating
a k-dimensional point defined by the supplied
Bezier coefficients. Finds the point that
describes the current position along the bezier
segment for k dimensions.
params:
b_coeff => b0...b3: Four k-dimensional Bezier
coefficients each one is a numpy.array
of size k by 1, so
b_coeff is a numpy array of size k by 4
k is the number of dimensions for each
coefficient
t: percentage of time elapsed for this segment
0 <= int <= 1.0
returns:
current position in k dimensions
numpy.array of size 1 by k
"""
return (pow((1-t), 3)*b_coeff[:, 0] +
3*pow((1-t), 2)*t*b_coeff[:, 1] +
3*(1-t)*pow(t, 2)*b_coeff[:, 2] +
pow(t, 3)*b_coeff[:, 3]
)
def bezier_point(b_coeffs, b_index, t):
"""
Finds the k values that describe the current
position along the bezier curve for k dimensions.
params:
b_coeffs: k-dimensional array
for every control point with 4 Bezier coefficients
numpy.array of size k by N by 4
N is the number of control points
k is the number of dimensions for each point
b_index: index position out between two of
the N b_coeffs for this point in time
int
t: percentage of time that has passed between
the two control points
0 <= int <= 1.0
returns:
b_point: current position in k dimensions
numpy.array of size 1 by k
"""
if b_index <= 0:
b_point = b_coeffs[:, 0, 0]
elif b_index > b_coeffs.shape[1]:
b_point = b_coeffs[:, -1, -1]
else:
t = 0.0 if t < 0.0 else t
t = 1.0 if t > 1.0 else t
b_coeff_set = b_coeffs[:, b_index-1, range(4)]
b_point = _cubic_spline_point(b_coeff_set, t)
return b_point
def bezier_curve(b_coeffs, num_intervals):
"""
Iterpolation of the entire Bezier curve at once,
using a specified number of intervals between
control points (encapsulated by b_coeffs).
params:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
N is the number of control points
k is the number of dimensions for each point
num_intervals: the number of intervals between
control points
int > 0
returns:
b_curve: positions along the bezier curve in k-dimensions
numpy.array of size N*num_interval+1 by k
(the +1 is to include the start position on the curve)
"""
assert num_intervals > 0,\
"Invalid number of intervals chosen (must be greater than 0)"
interval = 1.0 / num_intervals
(num_axes, num_bpts, _) = np.shape(b_coeffs)
b_curve = np.zeros((num_bpts*num_intervals+1, num_axes))
# Copy out initial point
b_curve[0, :] = b_coeffs[:, 0, 0]
for current_bpt in range(num_bpts):
b_coeff_set = b_coeffs[:, current_bpt, range(4)]
for iteration, t in enumerate(np.linspace(interval, 1,
num_intervals)):
b_curve[(current_bpt *
num_intervals +
iteration+1), :] = _cubic_spline_point(b_coeff_set, t)
return b_curve
| apache-2.0 |
jdavidrcamacho/Tests_GP | 08 - Thesis results/speed_test6.py | 1 | 5414 | import Gedi as gedi
import george
import numpy as np;
import matplotlib.pylab as pl; pl.close('all')
from time import time,sleep
import scipy.optimize as op
import sys
##### INITIAL DATA ###########################################################
nrep = 1
pontos=[]
temposQP=[]
temposmulti=[]
georgeQP=[]
sleeptime=10
lista=[10,20,50,100,200,500]
#for i in np.arange(100,650,200):
#for i in np.arange(100,1400,350):
### Functions george
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
### Functions gedi
def nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
ll = gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
return -np.array(gedi.kernel_likelihood.gradient_likelihood(kernel,x,y,yerr))
###############################################################################
### Things to run
for i0, i in enumerate(lista):
f=open("{0}.txt".format(i),"w")
sys.stdout = f
print i
pontos.append(i)
print 'pontos', pontos
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.QuasiPeriodic(15.0,2.0,1.0,10.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposQP.append(sum(av) / float(nrep))
print 'temposQP', temposQP
sleep(sleeptime*i0)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSineSquared(15.0, 2.0, 10.0)* \
gedi.kernel.ExpSquared(1.0,1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposmulti.append(sum(av) / float(nrep))
print 'temposmult', temposmulti
sleep(sleeptime*i0)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15.0**2*george.kernels.ExpSine2Kernel(2/2.0**2,10.0)* \
george.kernels.ExpSquaredKernel(1.0**2)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeQP.append(sum(av) / float(nrep))
print 'georgeQP', georgeQP
###########################################################################
sys.stdout = sys.__stdout__
f.close()
sleep(sleeptime*i0)
N = pontos
pl.figure()
pl.loglog(N, temposQP, 'r-')
pl.loglog(N, temposmulti, 'b-o')
pl.loglog(N, georgeQP, 'b--')
pl.xlim(0.9*N[0], 1.1*N[-1])
pl.xlabel('Number of points')
pl.ylabel('Time')
#pl.title('Covariance matrix calculations')
pl.legend(['gedi QP', 'gedi ESS*ES','george ESS*ES'],loc='upper left')
pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
pl.savefig('speedtest_6.png')
#pl.close('all') | mit |
sujithvm/internationality-journals | src/get_journal_list_Aminer.py | 3 | 1586 | __author__ = 'Sukrit'
import bson
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#from scipy.optimize import curve_fit
ELElist = []
with open('../data/Elsevier_journal_list.csv', 'r') as file :
x = file.readlines()
for line in x :
#print line
line = line.replace('&','and') #converting & to 'and' [UGH]
ELElist.append(line.rstrip()) #remove whitespaces
import pymongo
client = pymongo.MongoClient("localhost", 27017)
# db name - aminer
db = client.aminer
# collection
db.publications
jlist = []
i = 0
flag = False
for jname in ELElist :
flag = False
try :
if db.publications.find_one(filter = {'publication' : jname},limit = 1) != None :
flag = True
except bson.errors.InvalidStringData :
print "[ERROR] Could not insert value: " + jname
else :
if flag == True :
jlist.append(jname)
print "[INFO] Value found: " + jname
i += 1
print i
with open ("../output/both_journal_list.txt","w")as file:
for line in jlist:
file.write(line+"\n")
'''
cursor = db.publications.find()
for document in cursor :
if document['publication'] not in jlist :
if document['publication'] in ELElist :
jlist.append(document['publication'])
print document['publication']
'''
'''
citable_items = list(db.publications.find({"publication" : P})
citable_items_ids = []
for cite in citable_items : citable_items_ids.append(cite['index'])
'''
#print "[DEBUG] Number of papers ", len(papers)
#print papers
| mit |
hainm/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
MagnusS/mirage-bench | test-jitsu/plot.py | 1 | 1208 | #!/usr/bin/env python
import sys
print "# Creating graphs from stdin (requires matplotlib)"
results = {}
for filename in sys.argv[1:]:
results[filename] = []
with open(filename) as f:
for l in f:
line = l.strip()
if len(line) == 0 or line[0] == '#':
continue
if l[0] == "!":
print "Warning: Some results are invalid:"
print l
continue
results[filename].append(float(l) * 1000)
print results
import matplotlib.pyplot as plt
import numpy as np
#fig,ax = plt.subplots()
name = {}
name["processed_results_warm.dat"] = "Jitsu warm start"
name["processed_results_cold.dat"] = "Jitsu cold start wo/synjitsu"
name["processed_results_http_warm.dat"] = "Jitsu warm start (http)"
name["processed_results_http_cold.dat"] = "Jitsu cold start wo/synjitsu (http)"
plt.title('Time from DNS query to first packet of HTTP response')
for t in results:
title = t
if t in name:
title = name[t]
r = results[t]
print "Plotting",r,"==",len(r)
maxval = 1500
bins = 20
binwidth = maxval / bins
plt.hist(r, bins=range(1, maxval+binwidth, binwidth), label=title)
plt.legend(loc="best")
plt.ylabel("Results")
plt.xlabel("Time in milliseconds")
plt.savefig("jitsu.pdf")
plt.show()
| isc |
fredhusser/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
mworks/mworks | examples/Examples/FindTheCircle/analysis/Python/selection_counts.py | 1 | 1241 | import sys
from matplotlib import pyplot
import numpy
sys.path.insert(0, '/Library/Application Support/MWorks/Scripting/Python')
from mworks.data import MWKFile
def selection_counts(filename):
with MWKFile(filename) as f:
r_codec = f.reverse_codec
red_code = r_codec['red_selected']
green_code = r_codec['green_selected']
blue_code = r_codec['blue_selected']
red_count = 0
green_count = 0
blue_count = 0
for evt in f.get_events_iter(codes=[red_code, green_code, blue_code]):
if evt.data:
if evt.code == red_code:
red_count += 1
elif evt.code == green_code:
green_count += 1
else:
assert evt.code == blue_code
blue_count += 1
index = numpy.arange(3)
pyplot.bar(index,
[red_count, green_count, blue_count],
0.5,
color = ['r', 'g', 'b'],
align = 'center')
pyplot.xticks(index, ['Red', 'Green', 'Blue'])
pyplot.title('Selection Counts')
pyplot.show()
if __name__ == '__main__':
selection_counts(sys.argv[1])
| mit |
vtsuperdarn/davitpy | davitpy/pydarn/proc/music/music.py | 2 | 85275 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
GS : Ground Scatter Mapping Model. See Bristow et al. [1994] (default)
IS : Standard SuperDARN scatter mapping model.
S : Standard projection model
E1 : for Chisham E-region 1/2-hop ionospheric projection model
F1 : for Chisham F-region 1/2-hop ionospheric projection model
F3 : for Chisham F-region 1 1/2-hop ionospheric projection model
C : Chisham projection model
None : if you trust your elevation or altitude values
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = int(np.max(dataListArray[:,scanInx]) + 1)
if full_array:
nrBeams = int(fov.beams.max() + 1)
nrGates = int(fov.gates.max() + 1)
else:
nrBeams = int(np.max(dataListArray[:,beamInx]) + 1)
nrGates = int(np.max(dataListArray[:,gateInx]) + 1)
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[int(dataListArray[inx,scanInx]),int(dataListArray[inx,beamInx]),int(dataListArray[inx,gateInx])] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| gpl-3.0 |
acmaheri/sms-tools | lectures/8-Sound-transformations/plots-code/stftFiltering-orchestra.py | 3 | 1648 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, fs, w, N, H)
mY,pY = STFT.stftAnal(y, fs, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(N/2)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
JWarmenhoven/seaborn | seaborn/tests/test_linearmodels.py | 4 | 19116 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from nose import SkipTest
try:
import statsmodels.regression.linear_model as smlm
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
from . import PlotTestCase
from .. import linearmodels as lm
from .. import algorithms as algo
from .. import utils
from ..palettes import color_palette
rs = np.random.RandomState(0)
class TestLinearPlotter(PlotTestCase):
rs = np.random.RandomState(77)
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list("abcdefghij"), 6)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
df.loc[[10, 20, 30], 'y_na'] = np.nan
def test_establish_variables_from_frame(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y="y")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_series(self):
p = lm._LinearPlotter()
p.establish_variables(None, x=self.df.x, y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_array(self):
p = lm._LinearPlotter()
p.establish_variables(None,
x=self.df.x.values,
y=self.df.y.values)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_mix(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_bad(self):
p = lm._LinearPlotter()
with nt.assert_raises(ValueError):
p.establish_variables(None, x="x", y=self.df.y)
def test_dropna(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y_na="y_na")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y_na, self.df.y_na)
p.dropna("x", "y_na")
mask = self.df.y_na.notnull()
pdt.assert_series_equal(p.x, self.df.x[mask])
pdt.assert_series_equal(p.y_na, self.df.y_na[mask])
class TestRegressionPlotter(PlotTestCase):
rs = np.random.RandomState(49)
grid = np.linspace(-3, 3, 30)
n_boot = 100
bins_numeric = 3
bins_given = [-1, 0, 1]
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list(range(6)), 10)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
bw_err = rs.randn(6)[df.s.values] * 2
df.y += bw_err
p = 1 / (1 + np.exp(-(df.x * 2 + rs.randn(60))))
df["c"] = [rs.binomial(1, p_i) for p_i in p]
df.loc[[10, 20, 30], 'y_na'] = np.nan
def test_variables_from_frame(self):
p = lm._RegressionPlotter("x", "y", data=self.df, units="s")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_series_equal(p.units, self.df.s)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._RegressionPlotter(self.df.x, self.df.y, units=self.df.s)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.units, self.df.s)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._RegressionPlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_dropna(self):
p = lm._RegressionPlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._RegressionPlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_ci(self):
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 95)
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95, x_ci=68)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 68)
@skipif(_no_statsmodels)
def test_fast_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit with the "fast" function, which just does linear algebra
yhat_fast, _ = p.fit_fast(self.grid)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
@skipif(_no_statsmodels)
def test_regress_poly(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit an first-order polynomial
yhat_poly, _ = p.fit_poly(self.grid, 1)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_logx(self):
x = np.arange(1, 10)
y = np.arange(1, 10)
grid = np.linspace(1, 10, 100)
p = lm._RegressionPlotter(x, y, n_boot=self.n_boot)
yhat_lin, _ = p.fit_fast(grid)
yhat_log, _ = p.fit_logx(grid)
nt.assert_greater(yhat_lin[0], yhat_log[0])
nt.assert_greater(yhat_log[20], yhat_lin[20])
nt.assert_greater(yhat_lin[90], yhat_log[90])
@skipif(_no_statsmodels)
def test_regress_n_boot(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
@skipif(_no_statsmodels)
def test_regress_without_bootstrap(self):
p = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot, ci=None)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_bin_results(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
nt.assert_greater(self.df.x[x_binned == 0].min(),
self.df.x[x_binned == -1].max())
nt.assert_greater(self.df.x[x_binned == 1].min(),
self.df.x[x_binned == 0].max())
def test_scatter_data(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, x_jitter=.1)
x, y = p.scatter_data
nt.assert_true((x != self.df.d).any())
npt.assert_array_less(np.abs(self.df.d - x), np.repeat(.1, len(x)))
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, y_jitter=.05)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_less(np.abs(self.df.y - y), np.repeat(.1, len(y)))
def test_estimate_data(self):
p = lm._RegressionPlotter(self.df.d, self.df.y, x_estimator=np.mean)
x, y, ci = p.estimate_data
npt.assert_array_equal(x, np.sort(np.unique(self.df.d)))
npt.assert_array_almost_equal(y, self.df.groupby("d").y.mean())
npt.assert_array_less(np.array(ci)[:, 0], y)
npt.assert_array_less(y, np.array(ci)[:, 1])
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_partial(self):
x = self.rs.randn(100)
y = x + self.rs.randn(100)
z = x + self.rs.randn(100)
p = lm._RegressionPlotter(y, z)
_, r_orig = np.corrcoef(p.x, p.y)[0]
p = lm._RegressionPlotter(y, z, y_partial=x)
_, r_semipartial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_semipartial, r_orig)
p = lm._RegressionPlotter(y, z, x_partial=x, y_partial=x)
_, r_partial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_partial, r_orig)
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
@skipif(_no_statsmodels)
def test_robust_regression(self):
p_ols = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot)
_, ols_yhat, _ = p_ols.fit_regression(x_range=(-3, 3))
p_robust = lm._RegressionPlotter("x", "y", data=self.df,
robust=True, n_boot=self.n_boot)
_, robust_yhat, _ = p_robust.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(ols_yhat), len(robust_yhat))
@skipif(_no_statsmodels)
def test_lowess_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, lowess=True)
grid, yhat, err_bands = p.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(grid), len(yhat))
nt.assert_is(err_bands, None)
def test_regression_options(self):
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, order=2)
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, logistic=True)
def test_regression_limits(self):
f, ax = plt.subplots()
ax.scatter(self.df.x, self.df.y)
p = lm._RegressionPlotter("x", "y", data=self.df)
grid, _, _ = p.fit_regression(ax)
xlim = ax.get_xlim()
nt.assert_equal(grid.min(), xlim[0])
nt.assert_equal(grid.max(), xlim[1])
p = lm._RegressionPlotter("x", "y", data=self.df, truncate=True)
grid, _, _ = p.fit_regression()
nt.assert_equal(grid.min(), self.df.x.min())
nt.assert_equal(grid.max(), self.df.x.max())
class TestRegressionPlots(PlotTestCase):
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot("x", "y", self.df)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, scatter=False, ax=ax)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, fit_reg=False)
nt.assert_equal(len(ax.lines), 0)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, ci=None)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_is(ax.collections[0]._alpha, None)
nt.assert_equal(ax.collections[0]._facecolors[0, 3], 0.5)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color,
'alpha': 0.4})
nt.assert_equal(ax.collections[0]._alpha, 0.4)
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
def test_regplot_binned(self):
ax = lm.regplot("x", "y", self.df, x_bins=5)
nt.assert_equal(len(ax.lines), 6)
nt.assert_equal(len(ax.collections), 2)
def test_lmplot_basic(self):
g = lm.lmplot("x", "y", self.df)
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
def test_lmplot_hue(self):
g = lm.lmplot("x", "y", data=self.df, hue="h")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 2)
nt.assert_equal(len(ax.collections), 4)
def test_lmplot_markers(self):
g1 = lm.lmplot("x", "y", data=self.df, hue="h", markers="s")
nt.assert_equal(g1.hue_kws, {"marker": ["s", "s"]})
g2 = lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s"])
nt.assert_equal(g2.hue_kws, {"marker": ["o", "s"]})
with nt.assert_raises(ValueError):
lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s", "d"])
def test_lmplot_marker_linewidths(self):
if mpl.__version__ == "1.4.2":
raise SkipTest
g = lm.lmplot("x", "y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
nt.assert_equal(c[0].get_linewidths()[0], 0)
rclw = mpl.rcParams["lines.linewidth"]
nt.assert_equal(c[1].get_linewidths()[0], rclw)
def test_lmplot_facets(self):
g = lm.lmplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (3, 2))
g = lm.lmplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.lmplot("x", "y", data=self.df, hue="h", col="u")
nt.assert_equal(g.axes.shape, (1, 6))
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot("x", "y", data=self.df, col="h", hue="h")
nt.assert_is(g._legend, None)
def test_lmplot_scatter_kws(self):
g = lm.lmplot("x", "y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x, y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
@skipif(_no_statsmodels)
def test_residplot_lowess(self):
ax = lm.residplot("x", "y", self.df, lowess=True)
nt.assert_equal(len(ax.lines), 2)
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
def test_three_point_colors(self):
x, y = np.random.randn(2, 3)
ax = lm.regplot(x, y, color=(1, 0, 0))
color = ax.collections[0].get_facecolors()
npt.assert_almost_equal(color[0, :3],
(1, 0, 0))
| bsd-3-clause |
AliShug/RoboVis | robovis/load_histogram.py | 1 | 3645 | import numpy as np
# from matplotlib import pyplot as plt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class RVLoadHistogram(QGraphicsView):
'''A histogram for the maximum load across the reachable area'''
def __init__(self, ik):
width = 330
height = 120
self.scene = QGraphicsScene(0,-15,width,height-15)
super(RVLoadHistogram, self).__init__(self.scene)
self.setBackgroundBrush(QBrush(Qt.white))
self.setRenderHints(QPainter.Antialiasing)
self.setFrameStyle(0)
self.setAlignment(Qt.AlignCenter)
self.setFixedSize(width, height)
self.setSceneRect(0, 0, width, height)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scale(1, -1)
self.subscribers = {
'mouseEnter' : [],
'mouseLeave' : [],
'mouseMove' : []
}
self.lines = []
self.hist = []
self.edges = []
self.config = ik.config
self.update(ik)
self.setMouseTracking(True)
def update(self, ik=None):
if ik is not None:
self.ik = ik
self.min_load = self.config['min_load'].value
for line in self.lines:
self.scene.removeItem(line)
self.lines = []
width = self.width()
height = self.height()
loads = np.ma.masked_invalid(self.ik.loads*self.ik.partial_ok)
loads = np.ma.masked_where(loads == 0, loads).compressed()
self.hist, self.edges = np.histogram(loads, bins='auto')
buckets = len(self.hist)
self.screen_step = width/np.max(self.edges)
max_count = np.max(self.hist)
# Display histogram
for i in range(buckets):
x = self.edges[i] * self.screen_step
w = max(1, (self.edges[i+1] - self.edges[i]) * self.screen_step)
l = (self.edges[i] + self.edges[i + 1]) / 2
count = self.hist[i]
if l < self.min_load:
color = QColor(100,100,100)
else:
color = QColor(200, 180, 100)
# print(count)
line = self.scene.addLine(x, 5, x, 5 + (height-5) * count/max_count, QPen(color, w))
self.lines.append(line)
# Setpoint shows the configuration's minimum load
setpoint = self.config['min_load'].value * self.screen_step
line = self.scene.addLine(setpoint, 0, setpoint, height, QPen(QColor(150, 150, 255), 2))
self.lines.append(line)
def setMinimumLoad(self, val):
self.min_load = val
self.update()
def subscribe(self, event, function):
self.subscribers[event].append(function)
def enterEvent(self, event):
for func in self.subscribers['mouseEnter']:
func(event)
def leaveEvent(self, event):
self.setMinimumLoad(self.config['min_load'].value)
for func in self.subscribers['mouseLeave']:
func(event)
def mouseMoveEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.click(event.pos())
else:
pt = self.mapToScene(event.pos())
self.setMinimumLoad(pt.x()/self.screen_step)
for func in self.subscribers['mouseMove']:
func(event)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.click(event.pos())
def click(self, pos):
pt = self.mapToScene(pos)
self.config['min_load'].value = pt.x()/self.screen_step
self.config.notifyChange()
| mit |
cgrima/rsr | rsr/fit.py | 1 | 4401 | """
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method == 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[np.isfinite(sample)]
if len(sample) == 0:
bad = True
sample = np.zeros(10)+1
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
# plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, .5, 10, None)
prm0.add('pt', np.average(sample)**2,False, 0, 1, 'a**2+2*s**2*mu')
#if fit_model == 'hk':
# # From [Dutt and Greenleaf. 1994, eq.14]
# prm0.add('a4', np.average(sample)**4,False, 0, 1,
# '8*(1+1/mu)*s**4 + 8*s**2*s**2 + a**4')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
values = {}
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# Identify bad results
if bad is True:
result.success = False
result.values['a'] = 0
result.values['s'] = 0
result.values['mu'] = 0
result.values['pt'] = 0
result.chisqr = 0
result.redchi = 0
result.message = 'No valid data in the sample'
result.residual = 0
return result
| mit |
PedroMDuarte/thesis-hubbard-lda_evap | qmc.py | 1 | 16230 |
"""
This file provides a way to obtain thermodynamic quantities from an
interpolation of available QMC solutions
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font', **{'family':'serif'})
rc('text', usetex=True)
import glob
import os
import ldaconf
basedir = ldaconf.basedir
from scipy.spatial import Delaunay
from scipy.interpolate import CloughTocher2DInterpolator, LinearNDInterpolator
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
import logging
# create logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
#logger.disabled = True
def get_qty_mu( dat, mu, MUCOL, COL, **kwargs ):
# Control the interpolation between availble
# density points here
#~qtyinterp = 'nearest'
qtyinterp = 'linear'
msg = kwargs.get('msg', None)
DENSCOL = 1
ENTRCOL = 2
SPICOL = 3
CMPRCOL = 4
if COL == SPICOL:
default_minus = 1.0
default_plus = 0.0
elif COL == ENTRCOL:
default_minus = 0.0
default_plus = 0.0
elif COL == DENSCOL:
default_minus = 0.0
default_plus = 2.0
elif COL == CMPRCOL:
default_minus = 0.0
default_plus = 0.0
else:
raise ValueError("Column not defined: COL = {:d}".format(COL) )
CAREFUL = kwargs.get('careful', True)
if CAREFUL and (mu < -10. or mu > 60.):
CAREFUL = False
if qtyinterp == 'nearest':
index = np.argmin( np.abs(dat[:, MUCOL] - mu ))
qtyresult = dat[index,COL]
else:
# find the two closest chemical potentials that
# stride the point
mudat = dat[:,MUCOL]
verbose = False
if np.all(mu < mudat):
qtyresult = default_minus
if COL == DENSCOL or COL == ENTRCOL:
if verbose:
print "QTY=", COL,
print "===>>> mu={:0.2f} ".format(mu), msg
if dat[:,DENSCOL].min() < 0.1 :
qtyresult = default_minus
elif CAREFUL:
return 'out-of-bounds'
#print "====>>> BE CAREFUL : Using default density" + \
# " n=%.2f"%default_minus + \
# " at mu={:0.2f} ".format(mu),
#if msg is not None:
# print msg
#raise ValueError('density error')
elif np.all( mu > mudat):
qtyresult = default_plus
if COL == DENSCOL or COL == ENTRCOL:
if verbose:
print "QTY=", COL,
print "====>>> mu={:0.2f} ".format(mu), msg
if dat[:,DENSCOL].max() > 1.9 :
qtyresult = default_plus
elif CAREFUL:
return 'out-of-bounds'
#print "====>>> BE CAREFUL : Using default density" + \
# " n=%.2f"%default_plus + \
# " at mu={:0.2f} ".format(mu),
#if msg is not None:
# print msg
#raise ValueError('density error')
else:
# since the mu's are ordered we can do:
index0 = np.where( mudat <=mu )[0][-1]
index1 = np.where( mudat > mu )[0][0]
qty0 = dat[ index0, COL ]
qty1 = dat[ index1, COL ]
mu0 = dat[ index0, MUCOL ]
mu1 = dat[ index1, MUCOL ]
qtyresult = qty0 + (mu-mu0) * (qty1-qty0) / (mu1-mu0)
return qtyresult
#print
#print " mu = ", mu
#print "index0 = ", index0
#print "index1 = ", index1
#print "Doing linear interpolation for the qty"
#print " mu0 = ", mu0
#print " mu1 = ", mu1
#print "qty0 = ", qty0
#print "qty1 = ", qty1
#print "qtyresult = ", qtyresult
def find_closest_qmc( U=8, T=0.67, mu=4.0, **kwargs):
"""
This function finds the closest values of U and T in the QMC data
that straddle the values U and T given as arguments.
"""
nUs = 4
nTs = 3
ALLPTS = kwargs.get('ALLPTS', False)
# select which quantity will be returned, options are
# spi and entropy
QTY = kwargs.get('QTY', 'spi' )
if QTY == 'spi':
datadir = basedir + 'COMB_Final_Spi/'
elif QTY == 'entropy':
datadir = basedir + 'COMB_Final_Entr/'
elif QTY == 'density':
datadir = basedir + 'COMB_Final_Spi/'
elif QTY == 'kappa':
datadir = basedir + 'COMB_Final_Spi/'
else:
raise ValueError('Quantity not defined:' + str(QTY) )
fname = datadir + 'U*'
us = [ float(u.split('/U')[-1]) for u in glob.glob(fname) ]
du = [ np.abs(U-u) for u in us ]
index = np.argsort(du)
if ALLPTS:
Ulist0 = range(len(index))
else:
Ulist0 = range( nUs )
us = [ us[index[i]] for i in Ulist0]
#print us
#print du
#print index
#print "Closest Us = ", us
datfiles = []
for u in us:
# For the Spi and Stheta data
if QTY == 'spi' or QTY == 'density' or QTY == 'kappa':
fname = datadir + 'U{U:02d}/T*dat'.format(U=int(u))
fs = sorted(glob.glob(fname))
Ts = [ float(f.split('T')[1].split('.dat')[0]) for f in fs ]
elif QTY=='entropy':
fname = datadir + 'U{U:02d}/S*dat'.format(U=int(u))
fs = sorted(glob.glob(fname))
Ts = [ float(f.split('S')[1].split('.dat')[0]) for f in fs ]
Ts_g = [] ; Ts_l = [];
for t in Ts:
if t > T:
Ts_g.append(t)
else:
Ts_l.append(t)
order_g = np.argsort( [ np.abs( T -t ) for t in Ts_g ] )
order_l = np.argsort( [ np.abs( T -t ) for t in Ts_l ] )
try:
Tpts = [ Ts_g[ order_g[0]] , Ts_l[ order_l[0]] ]
except:
#print
#print "problem adding U=",u, "T=",Ts
#print "available T data does not stride the point"
#print "T =", T
#print "Ts =", Ts
#print "will add nearest Ts nevertheless"
Tpts = [ ]
#raise ValueError("QMC data not available.")
dT = [ np.abs( T - t) for t in Ts ]
index = np.argsort(dT)
if ALLPTS:
Tlist0 = range(len(Ts))
else:
Tlist0 = range( min(nTs , len(Ts)))
for i in Tlist0:
Tnew = Ts[index[i]]
if Tnew not in Tpts:
Tpts.append(Tnew)
for Tpt in Tpts:
index = Ts.index( Tpt )
try:
datfiles.append( [ fs[ index ], u, Ts[index] ] )
except:
print "problem adding U=",u, "T=",Ts
raise
# Need to make sure that selected T values stride both
# sides of the point
#print
#print u
#print Ts
#print dT
#print index
#print fs
# for i in range(min(3, len(Ts))):
# try:
# datfiles.append( [ fs[index[i]], u, Ts[index[i]] ] )
# except:
# print "problem adding U=",u, "T=",Ts
# raise
#
#datfiles.append( [ fs[index[1]], u, Ts[index[1]] ] )
#print datfiles
MUCOL = 0
DENSCOL = 1
ENTRCOL = 2
SPICOL = 3
CMPRCOL = 4
if QTY == 'spi':
COL = SPICOL
elif QTY == 'entropy':
COL = ENTRCOL
elif QTY == 'density':
COL = DENSCOL
elif QTY == 'kappa':
COL = CMPRCOL
msg0 = 'U={:0.2f}, T={:0.2f}'.format(U,T)
logger.debug("number of nearby points = " + str(len(datfiles)))
basedat = []
basedaterr = []
datserr = []
for mm, f in enumerate(datfiles):
# f[0] is the datafile name
# f[1] is U
# f[2] is T
radius = kwargs.get('radius', np.nan )
msg = 'U={:0.2f}, T={:0.2f}'.format(U,T) + \
' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
format(mu, radius, f[1], f[2])
try:
dat = np.loadtxt(f[0])
spival = get_qty_mu( dat, mu, MUCOL, COL, msg=msg )
# Toggle the false here to plot all of the out of bounds
if spival == 'out-of-bounds':
#spival_symmetry =
logger.info('qty is out of bounds')
basedaterr.append( [f[1], f[2], np.nan] )
datserr.append( dat )
if False:
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( dat[:,MUCOL], dat[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
transform=ax.transAxes, fontsize=6.)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%mm, dpi=200)
plt.close(fig)
else:
plt.show()
plt.close(fig)
continue
else:
basedat.append( [f[1], f[2], spival] )
except Exception as e :
print "Failed to get data from file = ", f
# toggle plotting, not implemented yet:
if True:
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( dat[:,MUCOL], dat[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
transform=ax.transAxes)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%mm, dpi=200)
else:
plt.show()
raise e
logger.debug("number of nearby valid points = " + str(len(basedat)))
error = False
points = None
# MAKE THE TRIANGULATION
basedat = np.array(basedat)
Us = np.unique(basedat[:,0] )
Ts = np.unique(basedat[:,1] )
validTriang = not ( len(Us) ==1 or len(Ts) == 1 )
#print "#Us={:d}, #Ts={:d}".format( len(Us), len(Ts) )
#print msg
if validTriang:
points = _ndim_coords_from_arrays(( basedat[:,0] , basedat[:,1]))
#print "Closest dat = ", basedat
#finterp = CloughTocher2DInterpolator(points, basedat[:,2])
finterp = LinearNDInterpolator( points, basedat[:,2] )
else:
logerr = 'not enough finterp points, QTY=%s'%QTY + '\n' + msg + '\n' \
+ "number of basedat pts = " + str(len(basedat))
print basedat
print "len Us = ", len(Us)
print "len Ts = ", len(Ts)
print "len 'out-of-bounds' = ", len( basedaterr )
if len( basedaterr ) > 0:
for bb, bdaterr in enumerate(basedaterr):
msgbb = 'U={:0.2f}, T={:0.2f}'.format(U,T) +\
' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
format(mu, radius, basedaterr[bb][0], basedaterr[bb][1] )
daterr = datserr[bb]
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.plot( daterr[:,MUCOL], daterr[:,COL], '.-')
ax.axvline( mu )
ax.text( 0.5, 1.05, msgbb, ha='center', va='bottom', \
transform=ax.transAxes, fontsize=6.)
if matplotlib.get_backend() == 'agg':
fig.savefig('err_mu_%02d.png'%bb, dpi=200)
plt.close(fig)
else:
plt.show()
plt.close(fig)
logger.exception(logerr)
raise ValueError('finterp')
if points == None:
logger.warning( "points object is None" )
if error == False:
try:
result = finterp( U,T )
if np.isnan(result):
if U >= 30.0 and U <=32.5:
result = finterp( 29.99, T )
logger.warning(" qmc: U={:0.1f} replaced to U=29.99 ".\
format(U) )
if np.isnan(result):
raise Exception("\n!!!! qmc: Invalid result, QTY:%s!!!!\n"%QTY \
+ msg0)
except Exception as e:
if kwargs.get('error_nan', False):
return np.nan
else:
error = True
logger.exception("Invalid QTY result!")
if error == False:
if result >= 8. and QTY == 'spi' :
print " Obtained Spi > 8. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
format( U, T, mu ),
print " ==> Spi={:0.2f}".format(float(result))
error = True
elif result >=4. and QTY == 'entropy':
print " Obtained Ent > 4. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
format( U, T, mu ),
print " ==> Result={:0.2f}".format(float(result))
error = True
logger.debug("error status = " + str(error))
if error or kwargs.get('showinterp',False):
logger.debug("Inside error if statement...")
if kwargs.get('error_nan', False):
pass
#return np.nan
#print "Interp points:"
#print basedat
if len(basedat) == 0 and len(basedaterr) > 0 :
basedaterr = np.array(basedaterr)
Userr = np.unique(basedaterr[:,0] )
Tserr = np.unique(basedaterr[:,1] )
validTriangerr = not ( len(Userr) ==1 or len(Tserr) == 1 )
points = _ndim_coords_from_arrays(( basedaterr[:,0] , basedaterr[:,1]))
tri = Delaunay(points)
else:
tri = Delaunay(points)
fig = plt.figure( figsize=(3.5,3.5))
gs = matplotlib.gridspec.GridSpec( 1,1 ,\
left=0.15, right=0.96, bottom=0.12, top=0.88)
ax = fig.add_subplot( gs[0] )
ax.grid(alpha=0.5)
ax.triplot(points[:,0], points[:,1], tri.simplices.copy())
ax.plot(points[:,0], points[:,1], 'o')
ax.plot( U, T, 'o', ms=6., color='red')
xlim = ax.get_xlim()
dx = (xlim[1]-xlim[0])/10.
ax.set_xlim( xlim[0]-dx, xlim[1]+dx )
ylim = ax.get_ylim()
dy = (ylim[1]-ylim[0])/10.
ax.set_ylim( ylim[0]-dy, ylim[1]+dy )
ax.set_xlabel('$U/t$')
ax.set_ylabel('$T/t$',rotation=0,labelpad=8)
tt = kwargs.get('title_text','')
ax.set_title( tt + '$U/t={:.2f}$'.format(U) + \
',\ \ ' + '$T/t={:.2f}$'.format(T), \
ha='center', va='bottom', fontsize=10)
save_err = kwargs.get('save_err',None)
if save_err is not None:
print "Saving png."
fig.savefig( save_err, dpi=300)
if matplotlib.get_backend() == 'agg':
fig.savefig('err.png', dpi=200)
print "Saved error to err.png"
else:
plt.show()
if not kwargs.get('single', False):
raise ValueError("Could not interpolate using QMC data.")
if ALLPTS:
if 'savepath' in kwargs.keys():
fig.savefig( kwargs.get('savepath',None) , dpi=300)
if error:
raise
return result
| mit |
phobson/statsmodels | docs/sphinxext/numpy_ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
buntyke/GPy | doc/sphinxext/ipython_directive.py | 12 | 27263 | # -*- coding: utf-8 -*-
"""Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives).
By default this directive assumes that your prompts are unchanged IPython ones,
but this can be customized. The configurable options that can be placed in
conf.py are
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import cStringIO
import os
import re
import sys
import tempfile
import ast
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
try:
import matplotlib
matplotlib.use('Agg')
except ImportError:
print "Couldn't find matplotlib"
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
inputline += '\n' + nextline[Nc:]
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self):
self.cout = cStringIO.StringIO()
# Create config object for IPython
config = Config()
config.Global.display_banner = False
config.Global.exec_lines = ['import numpy as np',
'from pylab import *'
]
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize ipython, but don't start its mainloop
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done *after* instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
#print "input='%s'"%self.input
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""Process data block for INPUT token."""
decorator, input, rest = data
image_file = None
image_directive = None
#print 'INPUT:', data # dbg
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = decorator=='@doctest' or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i==0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=True)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, image_file,
image_directive)
#print 'OUTPUT', output # dbg
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, image_file):
"""Process data block for OUTPUT token."""
if is_doctest:
submitted = data.strip()
found = output
if found is not None:
found = found.strip()
# XXX - fperez: in 0.11, 'output' never comes with the prompt
# in it, just the actual output text. So I think all this code
# can be nuked...
# the above comment does not appear to be accurate... (minrk)
ind = found.find(output_prompt)
if ind<0:
e='output prompt="%s" does not match out line=%s' % \
(output_prompt, found)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
if found!=submitted:
e = ('doctest failure for input_lines="%s" with '
'found_output="%s" and submitted output="%s"' %
(input_lines, found, submitted) )
raise RuntimeError(e)
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin%lineno
output_prompt = self.promptout%lineno
image_file = None
image_directive = None
for token, data in block:
if token==COMMENT:
out_data = self.process_comment(data)
elif token==INPUT:
(out_data, input_lines, output, is_doctest, image_file,
image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token==OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
if self._pyplot_imported:
return
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive conent
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
class IpythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
}
shell = EmbeddedSphinxShell()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
def setup(self):
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
seen_docs = [i for i in os.listdir(tempfile.tempdir)
if i.startswith('seen_doc')]
if seen_docs:
fname = os.path.join(tempfile.tempdir, seen_docs[0])
docs = open(fname).read().split('\n')
if not self.state.document.current_source in docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
else: # haven't processed any docs yet
docs = []
# get config values
(savefig_dir, source_dir, rgxin,
rgxout, promptin, promptout) = self.get_config_options()
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
# write the filename to a tempfile because it's been "seen" now
if not self.state.document.current_source in docs:
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
fout = open(fname, 'a')
fout.write(self.state.document.current_source+'\n')
fout.close()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython','']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
#text = '\n'.join(lines)
#figs = '\n'.join(figures)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
#print lines
if len(lines)>2:
if debug:
print '\n'.join(lines)
else: #NOTE: this raises some errors, what's it for?
#print 'INSERTING %d lines'%len(lines)
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
text = '\n'.join(lines)
txtnode = nodes.literal_block(text, text)
txtnode['language'] = 'ipython'
#imgnode = nodes.image(figs)
# cleanup
self.teardown()
return []#, imgnode]
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IpythonDirective)
app.add_config_value('ipython_savefig_dir', None, True)
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
app.add_config_value('ipython_promptin', 'In [%d]:', True)
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
ipython_directive('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print 'All OK? Check figures in _static/'
| mit |
idlead/scikit-learn | examples/text/document_classification_20newsgroups.py | 27 | 10521 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
cokelaer/colormap | src/colormap/colors.py | 1 | 32584 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# This file is part of the colormap software
#
# Copyright (c) 2011-20134
#
# File author(s): Thomas Cokelaer <cokelaer@gmail.com>
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# Website: https://github.com/cokelaer/colormap
# Documentation: http://packages.python.org/colormap
#
##############################################################################
"""Utilities provided in this module can be found either in the
standard Python module called :mod:`colorsys` or in matplotlib.colors
(e.g rgb2hex) or are original to this module (e.g., rgb2huv)
"""
# matplotlib dependence is only inside Colormap class
import colorsys
from easydev.tools import check_param_in_list, swapdict, check_range
from colormap.xfree86 import XFree86_colors
__all__ = ["HEX", "Color", "hex2web", "web2hex", "hex2rgb", "hex2dec",
"rgb2hex", "rgb2hsv", "hsv2rgb", "rgb2hls", "hls2rgb","yuv2rgb", "rgb2yuv",
"to_intensity", "yuv2rgb_int", "rgb2yuv_int", "Colormap"
]
def hex2web(hexa):
"""Convert hexadecimal string (6 digits) into *web* version (3 digits)
.. doctest::
>>> from colormap.colors import hex2web
>>> hex2web("#FFAA11")
'#FA1'
.. seealso:: :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
hexa = HEX().get_standard_hex_color(hexa)
return "#" + hexa[1::2]
def web2hex(web):
"""Convert *web* hexadecimal string (3 digits) into 6 digits version
.. doctest::
>>> from colormap.colors import web2hex
>>> web2hex("#FA1")
'#FFAA11'
.. seealso:: :func:`hex2web`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
return HEX().get_standard_hex_color(web)
def hex2rgb(hexcolor, normalise=False):
"""This function converts a hex color triplet into RGB
Valid hex code are:
* #FFF
* #0000FF
* 0x0000FF
* 0xFA1
.. doctest::
>>> from colormap.colors import hex2rgb
>>> hex2rgb("#FFF", normalise=False)
(255, 255, 255)
>>> hex2rgb("#FFFFFF", normalise=True)
(1.0, 1.0, 1.0)
.. seealso:: :func:`hex2web`, :func:`web2hex`,
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
hexcolor = HEX().get_standard_hex_color(hexcolor)[1:]
r, g, b = int(hexcolor[0:2], 16), int(hexcolor[2:4], 16), int(hexcolor[4:6], 16)
if normalise:
r, g, b = _normalise(r, g, b)
return r, g, b
def rgb2hex(r, g, b, normalised=False):
"""Convert RGB to hexadecimal color
:param: can be a tuple/list/set of 3 values (R,G,B)
:return: a hex vesion ofthe RGB 3-tuple
.. doctest::
>>> from colormap.colors import rgb2hex
>>> rgb2hex(0,0,255, normalised=False)
'#0000FF'
>>> rgb2hex(0,0,1, normalised=True)
'#0000FF'
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
if normalised:
r, g, b = _denormalise(r, g, b, mode="rgb")
r = int(r)
g = int(g)
b = int(b)
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
return '#%02X%02X%02X' % (r, g, b)
def rgb2hls(r, g, b, normalised=True):
"""Convert an RGB value to an HLS value.
:param bool normalised: if *normalised* is True, the input RGB triplet
should be in the range 0-1 (0-255 otherwise)
:return: the HLS triplet. If *normalised* parameter is True, the output
triplet is in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
.. doctest::
>>> from colormap.colors import rgb2hls
>>> rgb2hls(255,255,255, normalised=False)
(0.0, 1.0, 0.0)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`hsv2rgb`,
:func:`hls2rgb`
"""
# rgb_to_hsv expects normalised values !
if normalised:
upper = 1
else:
upper = 255
check_range(r, 0, upper)
check_range(g, 0, upper)
check_range(b, 0, upper)
if normalised==False:
r, g, b = _normalise(r, g, b)
h, l, s = colorsys.rgb_to_hls(r, g, b)
return h, l, s
def rgb2hsv(r, g, b, normalised=True):
"""Convert an RGB value to an HSV value.
:param bool normalised: if *normalised* is True, the input RGB triplet
should be in the range 0-1 (0-255 otherwise)
:return: the HSV triplet. If *normalised* parameter is True, the output
triplet is in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
.. doctest::
>>> from colormap.colors import rgb2hsv
>>> rgb2hsv(0.5,0,1)
(0.75, 1, 1)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
# rgb_to_hsv expects normalised values !
if normalised:
upper = 1
else:
upper = 255
check_range(r, 0, upper)
check_range(g, 0, upper)
check_range(b, 0, upper)
if normalised==False:
r, g, b = _normalise(r, g, b)
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return h,s,v
def hsv2rgb(h, s, v, normalised=True):
"""Convert a hue-saturation-value (HSV) value to a red-green-blue (RGB).
:param bool normalised: If *normalised* is True, the input HSV triplet
should be in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
:return: the RGB triplet. The output
triplet is in the range 0-1 whether the input is normalised or not.
.. doctest::
>>> from colormap.colors import hsv2rgb
>>> hsv2rgb(0.5,1,1, normalised=True) # doctest: +SKIP
(0, 1, 1)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`rgb2hls`,
:func:`hls2rgb`
.. seealso:: :func:`rgb2hex`
"""
if normalised:
upper = 1
else:
upper = 100
if normalised:
uppera = 1
else:
uppera = 360
check_range(h, 0, uppera)
check_range(s, 0, upper)
check_range(v, 0, upper)
if normalised == False:
h, s, v = _normalise(h, s, v, mode="hsv")
return colorsys.hsv_to_rgb(h, s, v)
def hls2rgb(h, l, s, normalised=True):
"""Convert an HLS value to a RGB value.
:param bool normalised: If *normalised* is True, the input HLS triplet
should be in the range 0-1; otherwise, H in the range 0-360 and LS
in the range 0-100.
:return: the RGB triplet. The output
triplet is in the range 0-1 whether the input is normalised or not.
.. doctest::
>>> from colormap.colors import hls2rgb
>>> hls2rgb(360, 50, 60, normalised=False) # doctest: +SKIP
(0.8, 0.2, 0.2)
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
:func:`rgb2hex`, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
"""
if normalised:
upper = 1
else:
upper = 100
if normalised:
uppera = 1
else:
uppera = 360
check_range(h, 0, uppera)
check_range(s, 0, upper)
check_range(l, 0, upper)
if normalised == False:
h, l, s = _normalise(h, l, s, mode="hls")
return colorsys.hls_to_rgb(h, l, s)
def hex2dec(data):
"""convert hexadecimal string (data) into a float in the [0-65536] inclusive range"""
if data[0] == '#':
data.replace('#', '')
return int(data, 16)/255.
def rgb2yuv(r, g, b):
"""Convert RGB triplet into YUV
:return: YUV triplet with values between 0 and 1
`YUV wikipedia <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 1
.. note:: the constants referenc used is Rec. 601
"""
check_range(r, 0, 1)
check_range(g, 0, 1)
check_range(b, 0, 1)
#y = int(0.299 * r + 0.587 * g + 0.114 * b)
#u = int(-0.14713 * r + -0.28886 * g + 0.436 * b)
#v = int(0.615 * r + -0.51499 * g + -0.10001 * b)
y = 0.299 * r + 0.587 * g + 0.114 * b
u = -32591.0/221500.0 * r + -63983.0/221500.0 * g + 0.436 * b
v = 0.615 * r + -72201./140200 * g + -7011/70100. * b
return (y, u, v)
def yuv2rgb(y, u, v):
"""Convert YUV triplet into RGB
`YUV <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(y, 0,1)
check_range(u, 0, 1)
check_range(v, 0, 1)
A, B, C, D = 701.0/615.0, 25251.0/63983.0, 209599.0/361005.0, 443.0/218.0
r = y + A * v
g = y - B * u - C * v
b = y + D * u
return (r, g, b)
def rgb2yuv_int(r, g, b):
"""Convert RGB triplet into YUV
`YUV wikipedia <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
y = int(0.299 * r + 0.587 * g + 0.114 * b)
u = int(-32591.0/221500.0 * r + -63983.0/221500.0 * g + 0.436 * b)
v = int(0.615 * r + -72201./140200 * g + -7011/70100. * b)
return (y, u, v)
def yuv2rgb_int(y, u, v):
"""Convert YUV triplet into RGB
`YUV <http://en.wikipedia.org/wiki/YUV>`_
.. warning:: expected input must be between 0 and 255 (not normalised)
"""
check_range(y, 0, 255)
check_range(u, 0, 255)
check_range(v, 0, 255)
r = int(y + 1.13983 * v)
g = int(y - 0.39465 * u - 0.58060 * v)
b = int(y + 2.03211 * u)
return (r, g, b)
def _denormalise(r, g, b, mode="rgb"):
check_param_in_list(mode, ["rgb", "hls", "hsv"])
if mode == "rgb":
return r*255., g*255., b*255.
elif mode in ["hls", "hsv"]:
return r*360., g*100., b*100.
def _normalise(r, g, b, mode="rgb"):
check_param_in_list(mode, ["rgb", "hls", "hsv"])
if mode == "rgb":
return r/255., g/255., b/255.
elif mode in ["hls", "hsv"]:
return r/360., g/100., b/100.
def to_intensity(n):
"""Return intensity
:param n: value between 0 and 1
:return: value between 0 and 255; round(n*127.5+127.5)
"""
check_range(n, 0, 1)
return int(round(n * 127.5 + 127.5))
class HEX(object):
"""Class to check the validity of an hexadecimal string and get standard string
By standard, we mean #FFFFFF (6 digits)
::
>>> h = HEX()
>>> h.is_valid_hex_color("#FFFF00")
True
"""
def __init__(self):
pass
def is_valid_hex_color(self, value, verbose=True):
"""Return True is the string can be interpreted as hexadecimal color
Valid formats are
* #FFF
* #0000FF
* 0x0000FF
* 0xFA1
"""
try:
self.get_standard_hex_color(value)
return True
except Exception as err:
if verbose:
print(err)
return False
def get_standard_hex_color(self, value):
"""Return standard hexadecimal color
By standard, we mean a string that starts with # sign followed by 6
character, e.g. #AABBFF
"""
if isinstance(value, str)==False:
raise TypeError("value must be a string")
if len(value) <= 3:
raise ValueError("input string must be of type 0xFFF, 0xFFFFFF or #FFF or #FFFFFF")
if value.startswith("0x") or value.startswith("0X"):
value = value[2:]
elif value.startswith("#"):
value = value[1:]
else:
raise ValueError("hexa string must start with a '#' sign or '0x' string")
value = value.upper()
# Now, we have either FFFFFF or FFF
# now check the length
for x in value:
if x not in "0123456789ABCDEF":
raise ValueError("Found invalid hexa character {0}".format(x))
if len(value) == 6 or len(value) == 8:
value = "#" + value[0:6]
elif len(value) == 3:
value = "#" + value[0]*2 + value[1]*2 + value[2]*2
else:
raise ValueError("hexa string should be 3, 6 or 8 digits. if 8 digits, last 2 are ignored")
return value
class Color(HEX):
"""Class to ease manipulation and conversion between color codes
You can create an instance in many differen ways. You can either use a
human-readable name as long as it is part of the
`XFree86 list <http://en.wikipedia.org/wiki/X11_color_names>`_
You can also provide a hexadecimal string (either 3 or 6 digits). You can
use triplets of values corresponding to the RGB, HSV or HLS conventions.
Here are some examples:
.. doctest::
from colormap import Color
Color("red") # human XFree86 compatible representation
Color("#f00") # standard 3 hex digits
Color("#ff0000") # standard 6 hex digits
Color(hsv=(0,1,0.5))
Color(hls=(0, 1, 0.5)) # HLS triplet
Color(rgb=(1, 0, 0)) # RGB triplet
Color(Color("red")) # using an instance of :class:`Color`
Note that the RGB, HLS and HSV triplets use normalised values. If you need
to normalise the triplet, you can use :mod:`colormap.colors._normalise` that
provides a function to normalise RGB, HLS and HSV triplets::
colors._normalise(*(255, 255, 0), mode="rgb")
colors._normalise(*(360, 50, 100), mode="hls")
If you provide a string, it has to be a valid string from XFree86.
In addition to the official names, the lower case names are valid. Besides,
there are names with spaces. The equivalent names without space are also
valid. Therefore the name "Spring Green", which is an official name can be
provided as "Spring Green", "spring green", "springgreen" or "SpringGreen".
"""
# Get official color names
colors = XFree86_colors.copy()
# add color names without spaces
aliases = dict([(x.replace(" ", ""),x) for x in colors.keys() if " " in x])
# add color names without spaces in lower cases
aliases.update([(x.replace(" ", "").lower(),x) for x in colors.keys() if " " in x])
# add color names in lower case
aliases.update(dict([(x.lower(),x) for x in colors.keys()]))
aliases.update(dict([(x,x) for x in colors.keys()]))
# keep track of all possible names
color_names = sorted(list(set(list(colors.keys()) +list( aliases.keys()))))
def __init__(self, name=None, rgb=None, hls=None, hsv=None):
super(Color, self).__init__()
self._name = None
self._mode = None
self._rgb = None
# Does the user provided the name argument (first one) as a string ?
if isinstance(name, str):
# if so, it can be a valid human name (e.g., red) or an hex
# assuming that valid hexadecimal starts with # or 0x,
# if we can interpret the string as an hexadecimal, we are done
if self.is_valid_hex_color(name, verbose=False):
self.hex = name
else:
# if not, then, the user probably provided a valid color name
# the property will check the validity.
self.name = name[:]
#all other input parameters are ignored
elif name == None:
if rgb:
self.rgb = rgb
elif hls:
self.hls = hls
elif hsv:
self.hsv = hsv
else:
raise ValueError("You must set one of the parameter")
elif isinstance(name, Color):
self.rgb = name.rgb
else:
raise ValueError("name parameter must be a string")
def _get_name(self):
return self._name
def _set_name(self, name):
check_param_in_list(name, self.color_names)
name = self.aliases[name]
self._name = name
# set hex and rgb at the same time based on the name
self.hex = self.colors[name]
name = property(_get_name, _set_name)
color = property(_get_name, _set_name)
def _get_hex(self):
return self._hex
def _set_hex(self, value):
# hex is an approximation made of 255 bits so do not define rgb here
if self.is_valid_hex_color(value):
value = self.get_standard_hex_color(value)
self._hex = value
if self._hex in self.colors.values():
self._name = swapdict(self.colors, check_ambiguity=False)[self._hex]
else:
self._name = "undefined"
self._rgb = hex2rgb(self._hex, normalise=True)
else:
# just to warn the user
self.get_standard_hex_color(value)
hex = property(_get_hex, _set_hex,
doc="getter/setter the hexadecimal value.")
def _get_rgb(self):
return self._rgb
def _set_rgb(self, value):
# set name, hex and rgb
self.hex = rgb2hex(*value , normalised=True)
# must reset rgb with its real value (set_hex may round the rgb)
# in _set_hex
self._rgb = value
rgb = property(_get_rgb, _set_rgb,
doc="getter/setter the RGB values (3-length tuple)")
def _get_hsv(self):
hsv = rgb2hsv(*self.rgb)
return hsv
def _set_hsv(self, value):
# TODO: value must be normalised
self.rgb = hsv2rgb(*value)
hsv = property(_get_hsv, _set_hsv,
doc="getter/setter the HSV values (3-length tuple)")
def _get_hls(self):
hls = rgb2hls(*self.rgb)
return hls
def _set_hls(self, value):
#hls = _normalise(*value, mode="hls")
#else:
hls = value
self.rgb = hls2rgb(*hls)
hls = property(_get_hls, _set_hls,
doc="getter/setter the HLS values (3-length tuple)")
def _get_lightness(self):
return self.hls[1]
def _set_lightness(self, lightness):
h, l, s = self.hls
self.hls = (h, lightness, s)
lightness = property(_get_lightness, _set_lightness,
doc="getter/setter the lightness in the HLS triplet")
def _get_saturation_hls(self):
return self.hls[2]
def _set_saturation_hls(self, saturation):
h, l, s = self.hls
self.hls = (h, l, saturation)
saturation_hls = property(_get_saturation_hls, _set_saturation_hls,
doc="getter/setter the saturation in the HLS triplet")
def _get_hue(self):
return self.hls[0]
def _set_hue(self, hue):
h, l, s = self.hls
self.hls = (hue, l, s)
hue = property(_get_hue, _set_hue,
doc="getter/setter the saturation in the HLS triplet")
def _get_red(self):
return self.rgb[0]
def _set_red(self, red):
r, g, b = self.rgb
self.rgb = (red,g,b)
red = property(_get_red, _set_red,
doc="getter/setter for the red color in RGB triplet")
def _get_green(self):
return self.rgb[1]
def _set_green(self, green):
r, g, b = self.rgb
self.rgb = (r, green, b)
green = property(_get_green, _set_green,
doc="getter/setter for the green color in RGB triplet")
def _get_blue(self):
return self.rgb[2]
def _set_blue(self, blue):
r, g, b = self.rgb
self.rgb = (r, g, blue)
blue = property(_get_blue, _set_blue,
doc="getter/setter for the blue color in RGB triplet")
def _get_value(self):
return self.hls[0]
def _set_value(self, value):
h, s, v = self.hsv
self.hsv = (h, s, value)
value = property(_get_value, _set_value,
doc="getter/setter the value in the HSV triplet")
def _get_yiq(self):
return colorsys.rgb_to_yiq(*self.rgb)
yiq = property(_get_yiq, doc="Getter for the YIQ triplet")
def __str__(self):
txt = 'Color {0}\n'.format(self.name)
txt+= ' hexa code: {0}\n'.format(self.hex)
txt+= ' RGB code: {0}\n'.format(self.rgb)
txt+= ' RGB code (un-normalised): {0}\n\n'.format([x*255 for x in self.rgb])
txt+= ' HSV code: {0}\n'.format(self.hsv)
txt+= ' HSV code: (un-normalised) {0} {1} {2}\n\n'.format(self.hsv[0]*360, self.hsv[1]*100, self.hsv[2]*100)
txt+= ' HLS code: {0}\n'.format(self.hls)
txt+= ' HLS code: (un-normalised) {0} {1} {2}\n\n'.format(self.hls[0]*360, self.hls[1]*100, self.hls[2]*100)
return txt
class Colormap(object):
"""Class to create matplotlib colormap
This example show how to get the pre-defined colormap called *heat*
.. plot::
:include-source:
from pylab import *
from colormap.colors import Colormap
c = Colormap()
cmap = c.get_cmap_heat()
c.test_colormap(cmap)
You may be more interested in building your own colormap::
# design your own colormap
d = {'blue': [0,0,0,1,1,1,0],
'green':[0,1,1,1,0,0,0],
'red': [1,1,0,0,0,1,1]}
cmap = c.cmap(d, reverse=False)
# see the results
c.test_colormap(cmap)
If you want a simple linear colormap, you can use the example above,
or use the :meth:`cmap_linear`. For instance for a diverging colormap
from red to green (with with color in between)::
cmap = c.cmap_linear("red", "white", "green")
c.test_colormap(cmap)
Even simpler, you can use a bicolor colormap :meth:`cmap_bicolor`. For instance
for a red to green colormap::
cmap = c.cmap_bicolor("red", "green")
c.test_colormap(cmap)
From matplotlib documentation, colormaps falls into 4 categories:
#. Sequential schemes for unipolar data that progresses from low to high
#. Diverging schemes for bipolar data that emphasizes positive or
negative deviations from acentral value
#. Cyclic schemes meant for plotting values that wrap around at the
endpoints, such as phase angle, wind direction, or time of day
#. Qualitative schemes for nominal data that has no inherent ordering,
where color is used only to distinguish categories
:references: matplotlib documentation and examples
http://matplotlib.org/examples/color/colormaps_reference.html
"""
def _get_colormap_mpl(self):
try:
from matplotlib.pyplot import colormaps as _cmaps
return _cmaps()
except:
return []
colormaps = property(_get_colormap_mpl)
def _get_sequentials(self):
return ['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'OrRd',
'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
sequentials = property(_get_sequentials)
def _get_sequentials2(self):
return ['afmhot', 'autumn', 'bone', 'cool', 'copper',
'gist_heat', 'gray', 'hot', 'pink',
'spring', 'summer', 'winter']
sequentials2 = property(_get_sequentials2)
def _get_diverging(self):
return ['BrBG', 'PRGn', 'PiYG', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu',
'RdYlGn', 'Spectral', 'bwr', 'coolwarm', 'seismic']
diverging = property(_get_diverging)
def _get_diverging_black(self):
return ['red_black_sky', 'red_black_blue', 'red_black_green', 'yellow_black_blue',
'yellow_black_sky', 'red_black_orange', 'pink_black_green(w3c)'
]
diverging_black = property(_get_diverging_black)
def _get_qualitative(self):
return ['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3']
qualitative = property(_get_qualitative)
def _get_misc(self):
return ['gist_earth', 'terrain', 'ocean', 'gist_stern',
'brg', 'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
misc = property(_get_misc)
def plot_rgb_from_hex_list(self, cols):
"""This functions takes a list of hexadecimal values and plots
the RGB curves. This can be handy to figure out the RGB functions
to be used in the :meth:`get_cmap`.
.. plot::
:include-source:
:width: 60%
from colormap.colors import Colormap
c = Colormap()
t = ['#FF0000FF', '#FF4D00FF', '#FF9900FF', '#FFE500FF',
'#CCFF00FF', '#80FF00FF', '#33FF00FF', '#00FF19FF',
'#00FF66FF', '#00FFB2FF', '#00FFFFFF', '#00B3FFFF',
'#0066FFFF', '#001AFFFF', '#3300FFFF', '#7F00FFFF',
'#CC00FFFF','#FF00E6FF','#FF0099FF', '#FF004DFF']
c.plot_rgb_from_hex_list(t)
"""
import pylab
red = [hex2rgb(x)[0]/255. for x in cols]
blue = [hex2rgb(x)[2]/255. for x in cols]
green = [hex2rgb(x)[1]/255. for x in cols]
x = pylab.linspace(0, 1, len(cols))
pylab.clf()
pylab.plot(x, red, 'ro-', alpha=0.5)
pylab.plot(x, green, 'gs-', alpha=0.5, markersize=15)
pylab.plot(x, blue, 'bx-', alpha=0.5, markersize=15)
pylab.ylim([-0.1, 1.1])
def cmap_bicolor(self, color1, color2, reverse=False, N=256):
"""Provide 3 colors in format accepted by :class:`Color`
::
>>> red = Color('red')
>>> white = Color('white')
>>> cmap = cmap_bicolor(red, white)
"""
c1 = Color(color1)
c2 = Color(color2)
dico = {'red': [c1.red, c2.red],
'green':[c1.green, c2.green],
'blue':[c1.blue, c2.blue]}
return self.cmap(dico, reverse=reverse, N=N)
def cmap_linear(self, color1, color2, color3, reverse=False, N=256):
"""Provide 3 colors in format accepted by :class:`Color`
::
red = Color('red')
cmap = cmap_linear(red, 'white', '#0000FF')
"""
c1 = Color(color1)
c2 = Color(color2)
c3 = Color(color3)
dico = {'red': [c1.red, c2.red, c3.red],
'green':[c1.green, c2.green, c3.green],
'blue':[c1.blue, c2.blue, c3.blue]}
return self.cmap(dico, reverse=reverse, N=N)
def cmap(self, colors=None, reverse=False, N=256):
"""Return a colormap object to be used within matplotlib
:param dict colors: a dictionary that defines the RGB colors to be
used in the colormap. See :meth:`get_cmap_heat` for an example.
:param bool reverse: reverse the colormap is set to True (defaults to False)
:param int N: Defaults to 50
"""
# matplotlib colormaps
if colors in self.colormaps:
if reverse and colors.endswith("_r") is False:
colors += "_r"
from matplotlib.cm import get_cmap
return get_cmap(colors)
# custom ones
elif colors in self.diverging_black:
c1, c2, c3 = colors.split("_")
# special case of sky, which does not exists
c3 = c3.replace("sky", "deep sky blue")
return self.cmap_linear(c1, c2, c3)
elif colors == 'heat':
return self.get_cmap_heat()
elif colors == 'heat_r':
return self.get_cmap_heat_r()
# Keep these dependencies inside the function to allow
# installation of colormap without those dependencies
# FIXME remove numpy dependencies
import numpy as np
# extracted from R, heat.colors(20)
if reverse:
for k in colors.keys():
colors[k].reverse()
# If index not given, RGB colors are evenly-spaced in colormap.
index = np.linspace(0, 1, len(colors['red']))
# Adapt color_data to the form expected by LinearSegmentedColormap.
color_data = dict((key, [(x, y, y) for x, y in zip(index, value)])
for key, value in list(colors.items()))
import matplotlib
f = matplotlib.colors.LinearSegmentedColormap
m = f('my_color_map', color_data, N)
return m
def get_cmap_heat(self):
"""Return a heat colormap matplotlib-compatible colormap
This heat colormap should be equivalent to heat.colors() in R.
::
>>> from colormap.colors import Colormap
>>> cmap = Colormap.get_cmap_heat()
You can generate the colormap based solely on this information for the RGB
functions along::
d= { 'blue':[0,0,0,0,1],
'green':[0,.35,.7,1,1],
'red':[1,1,1,1,1]}
cmap = Colormap.get_cmap(d)
"""
return self.cmap(
{ 'blue':[0, 0, 0, 0, 1],
'green':[0, .35, .7, 1, 1],
'red':[1, 1, 1, 1, 1]}, reverse=False)
def get_cmap_heat_r(self):
"""Return a heat colormap matplotlib-compatible colormap
Same as :meth:`get_cmap_heat` but reversed
"""
return self.cmap(
{ 'blue':[0, 0, 0, 0, 1],
'green':[0, .35, .7, 1, 1],
'red':[1, 1, 1, 1, 1]}, reverse=True)
def get_cmap_rainbow(self):
"""colormap similar to rainbow colormap from R
.. note:: The red is actually appearing on both sides... Yet
this looks like what is coded in R 3.0.1
"""
return self.cmap(
{ 'blue': [0, 0, 0, 1, 1, 1, 0],
'green':[0, 1, 1, 1, 0, 0, 0],
'red': [1, 1, 0, 0, 0, 1, 1]}, reverse=False)
def get_cmap_red_green(self):
return self.cmap(
{ 'green': [0, 0.4, 0.6, .75, .8, .9, 1, .9, .8, .6],
'blue' : [0, .4, .6, .75, .8, .7, .6, .35, .17, .1],
'red': [1, 1, 1, 1, 1, .9, .8, .6, .3, .1]}, reverse=True)
def test_colormap(self, cmap=None):
"""plot one colormap for testing
By default, test the :meth:`get_cmap_heat`
"""
if cmap is None:
cmap = self.get_cmap_heat()
import numpy as np
from pylab import clf, pcolor, colorbar, show, linspace, axis
A, B = np.meshgrid(linspace(0, 10, 100), linspace(0, 10, 100))
clf()
pcolor((A-5)**2+(B-5)**2, cmap=cmap)
colorbar()
show()
axis('off')
def plot_colormap(self, cmap_list=None):
"""cmap_list list of valid cmap or name of a set (sequential,
diverging,)
if none, plot all known colors
.. .. plot::
.. :width:80%
.. :include-source:
.. from colormap import Colormap
.. c = Colormap()
.. c.plot_colormap('sequential')
"""
from pylab import subplots
if isinstance(cmap_list, str):
if cmap_list in ['sequentials','sequentials2','qualitative',
'misc','diverging', 'diverging_black']:
cmap_list = getattr(self, cmap_list)
else:
cmap_list = [cmap_list]
if isinstance(cmap_list, list) is not True:
raise TypeError("""input must be a list of srtings or a single string. Each string should be found. For a user-defined cmap, use test_colormap""")
for this in cmap_list:
if this not in self.colormaps and this not in self.diverging_black:
raise ValueError("unknown colormap name. Please check valid names in colormaps attribute")
nrows = len(cmap_list)
gradient = [x/255. for x in range(0,256)]
gradient = [gradient, gradient]
#np.vstack((gradient, gradient))
fig, axes = subplots(nrows=nrows)
fig.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.8)
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=self.cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[2] + 0.08
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='left', fontsize=10)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
| bsd-3-clause |
sonusz/PhasorToolBox | examples/freq_meter.py | 1 | 1820 | #!/usr/bin/env python3
"""
This is an real-time frequency meter of two PMUs.
This code connects to two PMUs, plot the frequency of the past 300 time-stamps and update the plot in real-time.
"""
from phasortoolbox import PDC,Client
import matplotlib.pyplot as plt
import numpy as np
import gc
import logging
logging.basicConfig(level=logging.DEBUG)
class FreqMeter(object):
def __init__(self):
x = np.linspace(-10.0, 0.0, num=300, endpoint=False)
y = [60.0]*300
plt.ion()
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(211)
self.line1, = self.ax1.plot(x, y)
plt.title('PMU1 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
self.ax2 = self.fig.add_subplot(212)
self.line2, = self.ax2.plot(x, y)
plt.title('PMU2 Frequency Plot')
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
plt.tight_layout()
def update_plot(self, synchrophasors):
y_data = [[],[]]
for synchrophasor in synchrophasors:
for i, msg in enumerate(synchrophasor):
y_data[i].append(msg.data.pmu_data[0].freq)
self.line1.set_ydata(y_data[0])
self.line2.set_ydata(y_data[1])
self.ax1.set_ylim(min(y_data[0]),max(y_data[0]))
self.ax2.set_ylim(min(y_data[1]),max(y_data[1]))
self.fig.canvas.draw()
self.fig.canvas.flush_events()
del(synchrophasors)
gc.collect()
if __name__ == '__main__':
pmu_client1 = Client(remote_ip='10.0.0.1', remote_port=4722, idcode=1, mode='TCP')
pmu_client2 = Client(remote_ip='10.0.0.2', remote_port=4722, idcode=2, mode='TCP')
fm = FreqMeter()
pdc = PDC(clients=[pmu_client1,pmu_client2],history=300)
pdc.callback = fm.update_plot
pdc.run()
| mit |
subutai/htmresearch | projects/sequence_prediction/continuous_sequence/data/processTaxiData.py | 12 | 2451 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from pygeocoder import Geocoder
plt.ion()
year = 2015
record_num = []
aggregation_rule = {'Sum': sum}
ts_all = None
aggregation_window = "1min"
print " aggregate data at" + aggregation_window + "resolution"
for year in [2014, 2015]:
for month in xrange(1, 13):
datafileName = 'yellow_tripdata_' + str(year) + '-' + "{:0>2d}".format(month) + '.csv'
if os.path.isfile(datafileName):
print " Load Datafile: ", datafileName
# df = pd.read_csv(datafileName, header=0, nrows=100, usecols=[1, 3, 5, 6],
# names=['pickup_datetime', 'passenger_count', 'pickup_longitude', 'pickup_latitude'])
#
# postcode = np.zeros(len(df))
# for i in xrange(len(df)):
# try:
# results = Geocoder.reverse_geocode(df['pickup_latitude'][i], df['pickup_longitude'][i])
# postcode[i] = results.postal_code
# except:
# pass
df = pd.read_csv(datafileName, header=0, usecols=[1, 3], names=['pickup_datetime', 'passenger_count'])
record_num.append(len(df))
ts = pd.Series(np.array(df.passenger_count), index=pd.to_datetime(df.pickup_datetime))
del df
ts_aggregate = ts.resample(aggregation_window, how=aggregation_rule)
if ts_all is not None:
print " concat ts_all"
ts_all = pd.concat([ts_all, ts_aggregate])
else:
print " initialize ts_all"
ts_all = ts_aggregate
else:
print datafileName, " not exist"
print "include time of day and day of week as input field"
date = ts_all.index
dayofweek = (date.dayofweek)
timeofday = (date.hour*60 + date.minute)
passenger_count = np.array(ts_all['Sum'])
seq = pd.DataFrame(np.transpose(np.array([passenger_count, timeofday, dayofweek])), columns=['passenger_count', 'timeofday', 'dayofweek'], index=ts_all.index)
plt.close('all')
plt.figure(1)
plt.plot(seq.index, seq.passenger_count)
import csv
outputFileName = "nyc_taxi_" + aggregation_window + ".csv"
outputFile = open(outputFileName,"w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(['timestamp', 'passenger_count', 'timeofday', 'dayofweek'])
csvWriter.writerow(['datetime', 'int', 'int', 'string'])
csvWriter.writerow(['T', '', '', ''])
for i in range(len(ts_all)):
csvWriter.writerow([seq.index[i], seq.passenger_count[i], seq.timeofday[i], seq.dayofweek[i]])
outputFile.close() | agpl-3.0 |
fishroot/nemoa | nemoa/file/nplot.py | 1 | 16627 | # -*- coding: utf-8 -*-
"""Common function for creating plots with matplotlib."""
__author__ = 'Patrick Michl'
__email__ = 'frootlab@gmail.com'
__license__ = 'GPLv3'
__docformat__ = 'google'
import numpy as np
from nemoa.types import OptDict
class Plot:
"""Base class for matplotlib plots.
Export classes like Histogram, Heatmap or Graph share a common
interface to matplotlib, as well as certain plotting attributes.
This base class is intended to provide a unified interface to access
matplotlib and those attributes.
Attributes:
"""
_default: dict = {
'fileformat': 'pdf',
'figure_size': (10.0, 6.0),
'dpi': None,
'bg_color': 'none',
'usetex': False,
'font_family': 'sans-serif',
'style': 'seaborn-white',
'title': None,
'show_title': True,
'title_fontsize': 14.0
}
_config: dict = {}
_kwds: dict = {}
_plt = None
_fig = None
_axes = None
def __init__(self, **kwds):
""" """
try:
import matplotlib
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
# merge config from defaults, current config and keyword arguments
self._kwds = kwds
self._config = {**self._default, **self._config, **kwds}
# update global matplotlib settings
matplotlib.rc('text', usetex=self._config['usetex'])
matplotlib.rc('font', family=self._config['font_family'])
# link matplotlib.pyplot
import matplotlib.pyplot as plt
self._plt = plt
# close previous figures
plt.close('all')
# update plot settings
plt.style.use(self._config['style'])
# create figure
self._fig = plt.figure(
figsize=self._config['figure_size'],
dpi=self._config['dpi'],
facecolor=self._config['bg_color'])
# create subplot (matplotlib.axes.Axes)
self._axes = self._fig.add_subplot(111)
def set_default(self, config: OptDict = None) -> bool:
"""Set default values."""
self._config = {**self._config, **(config or {}), **self._kwds}
return True
def plot_title(self) -> bool:
"""Plot title."""
if not self._config['show_title']:
return False
title = self._config['title'] or 'Unknown'
fontsize = self._config['title_fontsize']
getattr(self._plt, 'title')(title, fontsize=fontsize)
return True
def show(self) -> None:
"""Show plot."""
getattr(self._plt, 'show')()
def save(self, path, **kwds):
"""Save plot to file."""
return self._fig.savefig(path, dpi=self._config['dpi'], **kwds)
def release(self):
"""Clear current plot."""
return self._fig.clear()
class Heatmap(Plot):
""" """
_config = {
'interpolation': 'nearest',
'grid': True
}
def plot(self, array):
""" """
try:
from matplotlib.cm import hot_r
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
# plot grid
self._axes.grid(self._config['grid'])
# plot heatmap
cax = self._axes.imshow(
array,
cmap=hot_r,
interpolation=self._config['interpolation'],
extent=(0, array.shape[1], 0, array.shape[0]))
# create labels for axis
max_font_size = 12.
x_labels = []
for label in self._config['x_labels']:
if ':' in label:
label = label.split(':', 1)[1]
x_labels.append(get_texlabel(label))
y_labels = []
for label in self._config['y_labels']:
if ':' in label:
label = label.split(':', 1)[1]
y_labels.append(get_texlabel(label))
fontsize = min(max_font_size, \
400. / float(max(len(x_labels), len(y_labels))))
self._plt.xticks(
np.arange(len(x_labels)) + 0.5,
tuple(x_labels), fontsize=fontsize, rotation=65)
self._plt.yticks(
len(y_labels) - np.arange(len(y_labels)) - 0.5,
tuple(y_labels), fontsize=fontsize)
# create colorbar
cbar = self._fig.colorbar(cax)
for tick in cbar.ax.get_yticklabels():
tick.set_fontsize(9)
# (optional) plot title
self.plot_title()
return True
class Histogram(Plot):
""" """
_config = {
'bins': 100,
'facecolor': 'lightgrey',
'edgecolor': 'black',
'histtype': 'bar',
'linewidth': 0.5,
'grid': True
}
def plot(self, array):
""" """
# plot grid
self._axes.grid(self._config['grid'])
# plot histogram
self._axes.hist(
array,
bins=self._config['bins'],
facecolor=self._config['facecolor'],
histtype=self._config['histtype'],
linewidth=self._config['linewidth'],
edgecolor=self._config['edgecolor'])
# (optional) plot title
self.plot_title()
return True
class Scatter2D(Plot):
""" """
_config = {
'grid': True,
'pca': True
}
@staticmethod
def _pca2d(array):
"""Calculate projection to largest two principal components."""
# get dimension of array
dim = array.shape[1]
# calculate covariance matrix
cov = np.cov(array.T)
# calculate eigevectors and eigenvalues
vals, vecs = np.linalg.eig(cov)
# sort eigevectors by absolute eigenvalues
pairs = [(np.abs(vals[i]), vecs[:, i]) for i in range(len(vals))]
pairs.sort(key=lambda x: x[0], reverse=True)
# calculate projection matrix
proj = np.hstack(
[pairs[0][1].reshape(dim, 1), pairs[1][1].reshape(dim, 1)])
# calculate projection
parray = np.dot(array, proj)
return parray
def plot(self, array):
""" """
# test arguments
if array.shape[1] != 2:
if self._config['pca']:
array = self._pca2d(array)
else: raise TypeError(
"first argument is required to be an array of shape (n, 2)")
x, y = array[:, 0], array[:, 1]
# plot grid
self._axes.grid(self._config['grid'])
# plot scattered data
self._axes.scatter(x, y)
# (optional) plot title
self.plot_title()
return True
class Graph(Plot):
_config = {
'padding': (0.1, 0.1, 0.1, 0.1),
'show_legend': False,
'legend_fontsize': 9.0,
'graph_layout': 'layer',
'graph_direction': 'right',
'node_style': 'o',
'edge_width_enabled': True,
'edge_curvature': 1.0
}
def plot(self, G):
"""Plot graph.
Args:
G: networkx graph instance
figure_size (tuple): figure size in inches
(11.69,8.27) for A4, (16.53,11.69) for A3
edge_attribute (string): name of edge attribute, that
determines the edge colors by its sign and the edge width
by its absolute value.
default: 'weight'
edge_color (bool): flag for colored edges
True: edge colors are determined by the sign of the
attribute 'weight'
False: edges are black
edge_poscolor (string): name of color for edges with
positive signed attribute. For a full list of specified
color names see nemoa.base.nplot.get_color()
edge_negcolor (string): name of color for edges with
negative signed attribute. For a full list of specified
color names see nemoa.base.nplot.get_color()
edge_curvature (float): value within the intervall [-1, 1],
that determines the curvature of the edges.
Thereby 1 equals max convexity and -1 max concavity.
direction (string): string within the list ['up', 'down',
'left', 'right'], that dermines the plot direction of the
graph. 'up' means, the first layer is at the bottom.
edge_style (string): '-', '<-', '<->', '->',
'<|-', '<|-|>', '-|>', '|-', '|-|', '-|',
']-', ']-[', '-[', 'fancy', 'simple', 'wedge'
Returns:
Boolen value which is True if no error occured.
"""
try:
import matplotlib.patches
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
try:
import networkx as nx
except ImportError as err:
raise ImportError(
"requires package networkx: "
"https://networkx.github.io") from err
from nemoa.base import ndict
from nemoa.math import graph
# adjust size of subplot
fig = self._fig
ax = self._axes
ax.set_autoscale_on(False)
figsize = fig.get_size_inches() * fig.dpi
ax.set_xlim(0., figsize[0])
ax.set_ylim(0., figsize[1])
ax.set_aspect('equal', 'box')
ax.axis('off')
# get node positions and sizes
layout_params = ndict.crop(self._config, 'graph_')
del layout_params['layout']
pos = graph.get_layout(
G, layout=self._config['graph_layout'], size=figsize,
padding=self._config['padding'], **layout_params)
sizes = graph.get_layout_normsize(pos)
node_size = sizes.get('node_size', None)
node_radius = sizes.get('node_radius', None)
line_width = sizes.get('line_width', None)
edge_width = sizes.get('edge_width', None)
font_size = sizes.get('font_size', None)
# get nodes and groups sorted by node attribute group_id
groups = graph.get_groups(G, attribute='group')
sorted_groups = sorted(
list(groups.keys()),
key=lambda g: 0 if not isinstance(g, list) or not g \
else G.node.get(g[0], {}).get('group_id', 0))
# draw nodes, labeled by groups
for group in sorted_groups:
gnodes = groups.get(group, [])
if not gnodes:
continue
refnode = G.node.get(gnodes[0])
label = refnode['description'] or refnode['group'] or str(group)
# draw nodes in group
node_obj = nx.draw_networkx_nodes(
G, pos, nodelist=gnodes, linewidths=line_width,
node_size=node_size, node_shape=self._config['node_style'],
node_color=get_color(refnode['color'], 'white'), label=label)
node_obj.set_edgecolor(
get_color(refnode['border_color'], 'black'))
# draw node labels
for node, data in G.nodes(data=True):
# determine label, fontsize and color
node_label = data.get('label', str(node).title())
node_label_format = get_texlabel(node_label)
node_label_size = np.sqrt(get_texlabel_width(node_label))
font_color = get_color(data['font_color'], 'black')
# draw node label
nx.draw_networkx_labels(
G, pos, labels={node: node_label_format},
font_size=font_size / node_label_size, font_color=font_color,
font_family='sans-serif', font_weight='normal')
# patch node for edges
circle = matplotlib.patches.Circle(
pos.get(node), alpha=0., radius=node_radius)
ax.add_patch(circle)
G.node[node]['patch'] = circle
# draw edges
seen = {}
if graph.is_directed(G):
default_edge_style = '-|>'
else: default_edge_style = '-'
for (u, v, data) in G.edges(data=True):
weight = data['weight']
if weight == 0.:
continue
# calculate edge curvature from node positions
# parameter rad describes the height in the normalized triangle
if (u, v) in seen:
rad = seen.get((u, v))
rad = -(rad + float(np.sign(rad)) * .2)
else:
scale = 1. / np.amax(np.array(figsize))
vec = scale * (np.array(pos[v]) - np.array(pos[u]))
rad = vec[0] * vec[1] / np.sqrt(2 * np.sum(vec ** 2))
if self._config['graph_layout'] == 'layer':
gdir = self._config['graph_direction']
if gdir in ['left', 'right']:
rad *= -1
seen[(u, v)] = rad
# determine style of edge from edge weight
if weight is None:
linestyle = '-'
linewidth = 0.5 * edge_width
alpha = 0.5
elif not self._config['edge_width_enabled']:
linestyle = '-'
linewidth = edge_width
alpha = np.amin([np.absolute(weight), 1.0])
else:
linestyle = '-'
linewidth = np.absolute(weight) * edge_width
alpha = np.amin([np.absolute(weight), 1.0])
# draw edge
node_a = G.node[u]['patch']
node_b = G.node[v]['patch']
arrow = matplotlib.patches.FancyArrowPatch(
posA=node_a.center, posB=node_b.center,
patchA=node_a, patchB=node_b,
arrowstyle=default_edge_style,
connectionstyle='arc3,rad=%s' % rad,
mutation_scale=linewidth * 12.,
linewidth=linewidth, linestyle=linestyle,
color=get_color(data.get('color', 'black')), alpha=alpha)
ax.add_patch(arrow)
# (optional) draw legend
if self._config['show_legend']:
num_groups = np.sum([1 for g in list(groups.values()) \
if isinstance(g, list) and g])
markerscale = 0.6 * self._config['legend_fontsize'] / font_size
ax.legend(
numpoints=1,
loc='lower center',
ncol=num_groups,
borderaxespad=0.,
framealpha=0.,
bbox_to_anchor=(0.5, -0.1),
fontsize=self._config['legend_fontsize'],
markerscale=markerscale)
# (optional) plot title
self.plot_title()
return True
def get_color(*args):
"""Convert color name of XKCD color name survey to RGBA tuple.
Args:
List of color names. If the list is empty, a full list of
available color names is returned. Otherwise the first valid
color in the list is returned as RGBA tuple. If no color is
valid None is returned.
"""
try:
from matplotlib import colors
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
if not args:
clist = list(colors.get_named_colors_mapping().keys())
return sorted([cname[5:].title() \
for cname in clist if cname[:5] == 'xkcd:'])
rgb = None
for cname in args:
try:
rgb = colors.to_rgb('xkcd:%s' % cname)
break
except ValueError:
continue
return rgb
def get_texlabel(string):
"""Return formated node label as used for plots."""
lstr = string.rstrip('1234567890')
if len(lstr) == len(string):
return '${%s}$' % (string)
rnum = int(string[len(lstr):])
lstr = lstr.strip('_')
return '${%s}_{%i}$' % (lstr, rnum)
def get_texlabel_width(string):
"""Return estimated width for formated node labels."""
lstr = string.rstrip('1234567890')
if len(lstr) == len(string):
return len(string)
lstr = lstr.strip('_')
rstr = str(int(string[len(lstr):]))
return len(lstr) + 0.7 * len(rstr)
def filetypes():
"""Return supported image filetypes."""
try:
import matplotlib.pyplot as plt
except ImportError as err:
raise ImportError(
"requires package matplotlib: "
"https://matplotlib.org") from err
return plt.gcf().canvas.get_supported_filetypes()
| gpl-3.0 |
e-koch/VLA_Lband | 14B-088/HI/imaging/sd_regridding/sd_comparison.py | 1 | 3520 |
'''
Compare the regridded versions of the SD datasets.
'''
from spectral_cube import SpectralCube
import matplotlib.pyplot as plt
import os
from corner import hist2d
from radio_beam import Beam
import astropy.units as u
import numpy as np
from paths import fourteenB_HI_data_path, data_path
from galaxy_params import gal
# Load in the 4 cubes and run.
vla_cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
arecibo_path = os.path.join(data_path, "Arecibo")
# Spectral interpolation, followed by reprojection.
arecibo_name = \
os.path.join(arecibo_path,
"14B-088_items_new/m33_arecibo_14B088.fits")
arecibo_cube = SpectralCube.read(arecibo_name)
ebhis_path = os.path.join(data_path, "EBHIS")
# Spectral interpolation, followed by reprojection.
ebhis_name = os.path.join(ebhis_path, "14B-088_items/m33_ebhis_14B088.fits")
ebhis_cube = SpectralCube.read(ebhis_name)
gbt_path = os.path.join(data_path, "GBT")
gbt_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_highres_Tmb_14B088.fits")
gbt_cube = SpectralCube.read(gbt_name)
gbt_lowres_name = os.path.join(gbt_path, "14B-088_items/m33_gbt_vlsr_Tmb_14B088.fits")
gbt_lowres_cube = SpectralCube.read(gbt_lowres_name)
# Compare total emission in the cubes.
vla_mask = np.isfinite(vla_cube[0])
arecibo_sum = arecibo_cube.with_mask(vla_mask).sum()
ebhis_sum = ebhis_cube.with_mask(vla_mask).sum()
gbt_sum = gbt_cube.with_mask(vla_mask).sum()
gbt_lowres_sum = gbt_lowres_cube.with_mask(vla_mask).sum()
plt.plot(arecibo_sum, ebhis_sum, gbt_sum, gbt_lowres_sum)
# Compare intensities in one plane
# arecibo_plane = arecibo_cube[500]
# ebhis_plane = ebhis_cube[500]
# gbt_plane = gbt_cube[500]
# gbt_plane[np.isnan(gbt_plane)] = 0.0 * u.K
# gbt_lowres_plane = gbt_lowres_cube[500]
# # Convolve GBT to match EBHIS
# beam_fwhm = lambda diam: ((1.2 * 21 * u.cm) / diam.to(u.cm)) * u.rad
# gbt_90m_beam = Beam(beam_fwhm(90 * u.m))
# gbt_plane._beam = gbt_90m_beam
# gbt_plane_convolved = gbt_plane.convolve_to(ebhis_plane.beam)
# gbt_100m_beam = Beam(beam_fwhm(100 * u.m))
# gbt_plane._beam = gbt_100m_beam
# gbt_plane_convolved_100 = gbt_plane.convolve_to(ebhis_plane.beam)
# ax = plt.subplot(131)
# hist2d(gbt_plane.value.ravel(), ebhis_plane.value.ravel(), ax=ax)
# plt.plot([0, 15], [0, 15])
# ax2 = plt.subplot(132)
# hist2d(gbt_plane_convolved.value.ravel(), ebhis_plane.value.ravel(), ax=ax2)
# plt.plot([0, 15], [0, 15])
# ax3 = plt.subplot(133)
# hist2d(gbt_plane_convolved_100.value.ravel(), ebhis_plane.value.ravel(), ax=ax3)
# plt.plot([0, 15], [0, 15])
# Best match for GBT is with a 106 m beam, convolved to the 80 m of EBHIS.
# Well, something is wrong here. It has to be that the difference between the
# data is a 80 m deconvolved w/ a 106 m beam. The EBHIS beam size should then
# be slightly smaller?
# Now convolve the Arecibo down to the GBT.
# gbt_90m_beam = Beam(beam_fwhm(90 * u.m))
# arecibo_plane_convolved = arecibo_plane.convolve_to(gbt_90m_beam)
# gbt_100m_beam = Beam(beam_fwhm(100 * u.m))
# arecibo_plane_convolved_100 = arecibo_plane.convolve_to(gbt_100m_beam)
# ax = plt.subplot(131)
# hist2d(arecibo_plane.value.ravel(), gbt_plane.value.ravel(), ax=ax)
# plt.plot([0, 15], [0, 15])
# ax2 = plt.subplot(132)
# hist2d(arecibo_plane_convolved.value.ravel(), gbt_plane.value.ravel(), ax=ax2)
# plt.plot([0, 15], [0, 15])
# ax3 = plt.subplot(133)
# hist2d(arecibo_plane_convolved_100.value.ravel(), gbt_plane.value.ravel(), ax=ax3)
# plt.plot([0, 15], [0, 15]) | mit |
transientlunatic/minke | minke/mdctools.py | 1 | 34706 | """
88b d88 88 88
888b d888 "" 88
88`8b d8'88 88
88 `8b d8' 88 88 8b,dPPYba, 88 ,d8 ,adPPYba,
88 `8b d8' 88 88 88P' `"8a 88 ,a8" a8P_____88
88 `8b d8' 88 88 88 88 8888[ 8PP"""""""
88 `888' 88 88 88 88 88`"Yba, "8b, ,aa
88 `8' 88 88 88 88 88 `Y8a `"Ybbd8"'
--------------------------------------------------------
This file is a part of Minke, a tool for generating simulated
gravitational wave signals, used for characterising and training
search algorithms.
Minke was created by Daniel Williams, based on work started by Chris
Pankow and others, and is built around the LALSimulation library.
"""
from glue.ligolw import ligolw, utils, lsctables
lsctables.use_in(ligolw.LIGOLWContentHandler);
import numpy
import lalburst, lalsimulation, lalmetaio
from minke.antenna import response
from lal import TimeDelayFromEarthCenter as XLALTimeDelayFromEarthCenter
#from pylal.xlal.datatypes.ligotimegps import LIGOTimeGPS
from lal import LIGOTimeGPS
from glue.ligolw.utils import process
import glue
import glue.ligolw
import gzip
import lal, lalframe
import numpy as np
import pandas as pd
import os
import os.path
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import random
import minke
from minke import sources
sourcemap = {}
for classin in dir(sources):
classin = sources.__dict__[classin]
if hasattr(classin, "waveform"):
sourcemap[classin.waveform] = classin
def source_from_row(row):
waveform = row.waveform
sourceobj = sourcemap[row.waveform].__new__(sourcemap[row.waveform])
sourceobj.numrel_data = str("")
params = {}
for attr in dir(row):
if not attr[0] == "_" and not attr[:3] =="get":
#print attr
try:
params[attr] = getattr(row, attr)
setattr(sourceobj, attr, getattr(row, attr))
except AttributeError:
print("Error processing the {} column".format(attr))
sourceobj.params = params
try:
sourceobj.time = row.time_geocent_gps
except:
sourceobj.time = row.geocent_start_time
pass
return sourceobj
def source_from_dict(params):
sourceobj = sourcemap[params['morphology']].__new__(sourcemap[params['morphology']])
sourceobj.numrel_data = str("")
params = {}
for attr in dir(row):
if not attr[0] == "_" and not attr[:3] =="get":
#print attr
params[attr] = getattr(row, attr)
setattr(sourceobj, attr, getattr(row, attr))
sourceobj.params = params
try:
sourceobj.time = row.time_geocent_gps
except:
sourceobj.time = row.geocent_start_time
pass
return sourceobj
table_types = {
# Ad-Hoc
"ga" : lsctables.SimBurstTable,
"sg" : lsctables.SimBurstTable,
"wnb" : lsctables.SimBurstTable,
"sc" : lsctables.SimBurstTable,
# Supernova Families
"d08" : lsctables.SimBurstTable,
"s10" : lsctables.SimBurstTable,
"m12" : lsctables.SimBurstTable,
"o13" : lsctables.SimBurstTable,
"y10" : lsctables.SimBurstTable,
# Long Duration
"adi" : lsctables.SimBurstTable,
# Ringdown
"rng" : lsctables.SimRingdownTable,
"gng" : lsctables.SimRingdownTable,
}
tables = {
"burst" : lsctables.SimBurstTable,
"ringdown" : lsctables.SimRingdownTable
}
def mkdir(path):
"""
Make all of the tree of directories in a given path if they don't
already exist.
Parameters
----------
path : str
The path to the desired directory.
"""
sub_path = os.path.dirname(path)
if not os.path.exists(sub_path):
mkdir(sub_path)
if not os.path.exists(path):
os.mkdir(path)
class TableTypeError(Exception):
pass
class MDCSet():
inj_families_names = {'ga' : 'Gaussian',
'sg' : 'SineGaussian',
'wnb': 'BTLWNB',
"sc" : "StringCusp",
# Supernova families
'd08' : 'Dimmelmeier+08',
's10' : 'Scheidegger+10',
'm12' : 'Mueller+12',
'o13' : 'Ott+13',
'y10' : "Yakunin+10",
# Long-duration
'adi' : 'ADI',
# Ringdown
'rng' : "BBHRingdown",
'gng' : "GenericRingdown",
}
inj_families_abb = dict((v,k) for k,v in list(inj_families_names.items()))
hist_parameters = {
"StringCusp": ["amplitude", "ra", "dec"],
"SineGaussian": ["hrss", "psi", "ra", "dec"],
"Gaussian": ["hrss", "psi", "ra", "dec"],
"BTLWNB": ["hrss", "ra", "dec"],
"Dimmelmeier+08": ['hrss', 'ra', 'dec']
}
waveforms = []
def __init__(self, detectors, name='MDC Set', table_type = "burst"):
"""
Represents an MDC set, stored in an XML SimBurstTable file.
Parameters
----------
detectors : list
A list of detector names where the injections should be made.
name : str
A name for the MDC Set. Defaults to 'MDC Set'.
table_type : str
The type of table which should be generated. Default is `burst`,
which generates a SimBurstTable.
"""
self.detectors = detectors
self.waveforms = []
self.strains = []
self.egw = []
self.times = []
self.name = name
self.times = np.array(self.times)
self.table_type = tables[table_type]
def __add__(self, waveform):
"""
Handle a waveform being added to the MDC set.
Parameters
----------
waveform : Waveform object
The waveform which should be added to the MDC set.
"""
# Check that this type of waveform can go into this type of
# XML file.
if not table_types[self.inj_families_abb[waveform.waveform]] == self.table_type:
raise TableTypeError()
self.waveforms.append(waveform)
self.times = np.append(self.times, waveform.time)
def save_xml(self, filename):
"""
Save the MDC set as an XML SimBurstTable.
Parameters
----------
filename : str
The location to save the xml file. The output is gzipped, so ending it with
a ".gz" would stick with convention.
"""
xmldoc = ligolw.Document()
lw = xmldoc.appendChild(ligolw.LIGO_LW())
sim = lsctables.New(self.table_type)
lw.appendChild(sim)
# This needs to be given the proper metadata once the package has the maturity to
# write something sensible.
for waveform in self.waveforms:
procrow = process.register_to_xmldoc(xmldoc, "minke_burst_mdc+{}".format(minke.__version__), {}) # waveform.params)
try:
waveform_row = waveform._row(sim)
waveform_row.process_id = procrow.process_id
except:
row = sim.RowType()
for a in list(self.table_type.validcolumns.keys()):
if a in list(waveform.params.keys()):
setattr(row, a, waveform.params[a])
else:
if not hasattr(waveform, a):
setattr(row, a, 0)
else:
setattr(row, a, getattr(waveform, a))
row.waveform = waveform.waveform
if self.table_type == lsctables.SimBurstTable:
# Fill in the time
row.set_time_geocent(GPS(float(waveform.time)))
# Get the sky locations
row.ra, row.dec, row.psi = waveform.ra, waveform.dec, waveform.psi
row.simulation_id = waveform.simulation_id
row.waveform_number = random.randint(0,int(2**32)-1)
### !! This needs to be updated.
row.process_id = "process:process_id:0" #procrow.process_id
waveform_row = row
sim.append(waveform_row)
#del waveform_row
# Write out the xml and gzip it.
utils.write_filename(xmldoc, filename, gz=True)
def load_xml(self, filename, full=True, start=None, stop=None):
"""Load the MDC Set from an XML file containing the SimBurstTable.
Parameters
----------
filename : str
The filename of the XML file.
full : bool
If this is true (which is the default) then all of
the calculated parameters are computed from the waveform
definintion.
start : float
The time at which the xml read-in should
start. The default is "None", in which case the xml file
will be read-in from the start.
end : float
The last time to be read from the xml file. The default is None,
which causes the xml to be read right-up to the last time in the
file.
To Do
-----
A the moment this loads the information in to the object, but it
doesn't produce waveform objects for each of the injections in the
file. This should be fixed so that the object works symmetrically.
"""
i = 0
#sim_burst_table = lalburst.SimBurstTableFromLIGOLw(filename, start, stop)
xml = glue.ligolw.utils.load_filename(filename,
contenthandler = glue.ligolw.ligolw.LIGOLWContentHandler,
verbose = True)
sim_burst_table = glue.ligolw.table.get_table(xml, self.table_type.tableName)
for i,simrow in enumerate(sim_burst_table):
# This is an ugly kludge to get around the poor choice of wavform name in the xmls, and
if simrow.waveform[:3]=="s15":
self.numrel_file = str(sim_burst_table.waveform)
sim_burst_table.waveform = "Dimmelmeier+08"
self.waveforms.append(source_from_row(simrow))
if full:
self._measure_hrss(i)
self._measure_egw_rsq(i)
if self.table_type == tables["burst"]:
self.times = np.append(self.times, float(simrow.time_geocent_gps))
def _generate_burst(self,row,rate=16384.0):
"""
Generate the burst described in a given row, so that it can be
measured.
Parameters
----------
row : SimBurst Row
The row of the waveform to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
hp :
The strain in the + polarisation
hx :
The strain in the x polarisation
hp0 :
A copy of the strain in the + polarisation
hx0 :
A copy of the strain in the x polarisation
"""
row = self.waveforms[row]
hp, hx, hp0, hx0 = row._generate()
return hp, hx, hp0, hx0
def _getDetector(self, det):
"""
A method to return a LALDetector object corresponding to a detector's
X#-style name, e.g. 'H1' as the Hanford 4km detector.
Parameters
----------
det : str
A string describing the detector in the format letter-number, e.g
"H1" would be the Hanford 4km detector, "L1" would be the
Livingston 4km, and so-forth.
Returns
-------
detector : LALDetector
The LAL object describing the detector
"""
# get detector
return lalsimulation.DetectorPrefixToLALDetector(det)
#if det not in lal.cached_detector_by_prefix.keys():
# raise ValueError, "%s is not a cached detector. "\
# "Cached detectors are: %s" % (det, inject.cached_detector.keys())
#return lal.cached_detector_by_prefix[det]
def _timeDelayFromGeocenter(self, detector, ra, dec, gpstime):
"""
Calculate the time delay between the geocentre and a given detector
for a signal from some sky location.
Parameters
----------
detector : str
A string describing the detector, e.g. H1 is the Hanford 4km
detector.
ra : float
The right-ascension of the observation in radians
dec : float
The declination of the obser
"""
if isinstance(detector, str): detector = self._getDetector(detector)
gpstime = LIGOTimeGPS(float(gpstime))
return XLALTimeDelayFromEarthCenter(detector.location, ra, dec, gpstime)
def directory_path(self):
"""
Generate the directory where the frames from this MDC should be stored,
so, e.g. Gaussians 0d100 would go in "ga/ga0d100/"
Returns
-------
str
the folder structure
"""
name = self._simID(0)
abb = self.inj_families_abb[self.waveforms[0].waveform].lower()
return "{}/{}".format(abb, name)
def _simID(self, row):
"""
Generate a name for an injection set in the format expected by cWB
Parameters
----------
row : SimBurst
The simburst table row describing the injection
Returns
-------
str
The name of the injection in the cWB format
"""
row = self.waveforms[row]
name = ''
numberspart = ''
if row.waveform in ("Dimmelmeier+08", "Scheidegger+10", "Mueller+12", "Ott+13", "Yakunin+10"):
#print row
numberspart = os.path.basename(row.params['numrel_data']).split('.')[0]
if row.waveform == "Gaussian":
numberspart = "{:.3f}".format(row.duration * 1e3)
elif row.waveform == "SineGaussian":
if row.pol_ellipse_e==1.0:
pol="linear"
elif row.pol_ellipse_e==0.0:
pol="circular"
elif 0.0<row.pol_ellipse_e<1.0:
pol = "elliptical"
else:
pol = "inclined"
numberspart = "f{:.0f}_q{:.0f}_{}".format(row.frequency, row.q, pol)
elif row.waveform == "BTLWNB":
numberspart = "{}b{}tau{}".format(row.frequency, row.bandwidth, row.duration)
name += '{}_{}'.format(self.inj_families_abb[row.waveform].lower(), numberspart).replace('.','d')
return name
def _measure_hrss(self, row, rate=16384.0):
"""
Measure the various components of hrss (h+^2, hx^2, hphx) for a given
input row. This is accomplished by generating the burst and calling
the SWIG wrapped XLALMeasureHrss in lalsimulation.
Parameters
----------
row : int
The row number of the waveforms to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
hrss : float
The measured hrss of the waveform amplitude: sqrt(|Hp|^2 + |Hx|^2)
hphp : float
The hrss of the + polarisation only.
hxhx : float
The hrss of the x polarisation only.
hphx : float
The hrss of |HpHx|
"""
row = self.waveforms[row]
hp, hx, hp0, hx0 = row._generate() #self._generate_burst(row)# self.hp, self.hx, self.hp0, self.hx0
hp0.data.data *= 0
hx0.data.data *= 0
# H+ hrss only
hphp = lalsimulation.MeasureHrss(hp, hx0)**2
# Hx hrss only
hxhx = lalsimulation.MeasureHrss(hp0, hx)**2
# sqrt(|Hp|^2 + |Hx|^2)
hrss = lalsimulation.MeasureHrss(hp, hx)
hp.data.data = numpy.abs(hx.data.data) + numpy.abs(hp.data.data)
# |H+Hx|
hphx = (lalsimulation.MeasureHrss(hp, hx0)**2 - hrss**2)/2
#print hrss
self.strains.append([hrss, hphp, hxhx, hphx])
def _measure_egw_rsq(self, row, rate=16384.0):
"""
Measure the energy emitted in gravitational waves divided
by the distance squared in M_solar / pc^2. This is accomplished
by generating the burst and calling the SWIG wrapped
XLALMeasureHrss in lalsimulation.
Parameters
----------
row : int
The row number of the waveforms to be measured
rate : float
The sampling rate of the signal, in Hz. Defaults to 16384.0Hz
Returns
-------
egw : float
The energy emitted in gravitational waves divided
by the distance squared in M_solar / pc^2.
"""
hp, hx, _, _ = self._generate_burst(row)
self.egw.append(lalsimulation.MeasureEoverRsquared(hp, hx))
def _responses(self, row):
"""
Calculate the antenna repsonses for each detector to the waveform.
Parameters
----------
row : int
The row number of the waveforms to be measured
Returns
-------
responses : list of lists
A list containing the lists of antenna responses, with the first
element of each list containing the detector acronym.
"""
output = []
row = self.waveforms[row]
for detector in self.detectors:
time = row.time_geocent_gps + self._timeDelayFromGeocenter(detector, row.ra, row.dec, row.time_geocent_gps)
time = np.float64(time)
rs = response(time, row.ra, row.dec, 0, row.psi, 'radians', detector)
output.append([detector, time, rs[0], rs[1]] )
return output
def plot_skymap(self):
"""
Plot a skymap of the injections distribution in RA and DEC on a Hammer projection.
Returns
-------
matplotlib figure
"""
fig = plt.figure()
# Load the ra and dec numbers out of the waveforms
dec = [getattr(s, 'dec') for s in self.waveforms]
ra = [getattr(s, 'ra') for s in self.waveforms]
# Make the plot on a hammer projection
plt.subplot(111, projection='hammer')
H, x, y = np.histogram2d(ra, dec, [50, 25], range=[[0, 2*np.pi], [-np.pi/2, np.pi/2]])
dist = plt.pcolormesh(x-np.pi,y, H.T, cmap="viridis")
plt.title("Sky distribution")
plt.colorbar(dist, orientation='horizontal')
return fig
def plot_hist(self, parameter):
"""
Plot a histogram of a waveform parameter.
Parameters
----------
parameter : str
The name of the simburst table parameter which is desired for the plot.
Returns
-------
matplotlib figure
"""
fig = plt.figure()
prms = [getattr(s, parameter) for s in self.waveforms]
ax2 = plt.subplot(111)
ax2.set_title("{} distribution".format(parameter))
ax2.set_xlabel(parameter)
ax2.hist(prms, bins=100, log=True, histtype="stepfilled", alpha=0.6);
return fig
def gravEn_row(self, row, frame):
"""
Produces a gravEn-style log row for a row of the simBurstTable.
Parameters
----------
row : int
The row number of the waveforms to be measured
Returns
-------
str
A string in the gravEn format which describes the injection.
"""
strains = self.strains[row]
rowname = self._simID(row)
responses = self._responses(row)
energy = self.egw[row]
row = self.waveforms[row]
output = []
if not row.incl:
cosincl = ""
else:
cosincl = np.cos(row.incl)
output.append(self.name) # GravEn_SimID
output.append(strains[0]) # SimHrss
output.append(energy) # SimEgwR2
output.append(strains[0]) # GravEn_Ampl
output.append(cosincl) # Internal_x the cosine of the angle the LOS makes with axis of angular momentum
output.append(row.phi) # Intenal_phi angle between source x-axis and the LOS
output.append(np.cos(np.pi/2.0 - row.dec)) # cos(External_x) # this needs to be the co-declination
output.append(row.ra if row.ra < np.pi else row.ra - 2*np.pi)
# ^ External_phi # This is the RA projected onto an Earth-based coordinate system
output.append(row.psi) # External_psi # source's polarisation angle
output.append(frame.start) # FrameGPS
output.append(row.time_geocent_gps) # EarthCtrGPS
output.append(rowname) # SimName
output.append(strains[1]) # SimHpHp
output.append(strains[2]) # SimHcHc
output.append(strains[3]) # SimHpHp
output.append(" ".join(" ".join(map(str,l)) for l in responses))
return ' '.join(str(e) for e in output)
class Frame():
"""
Represents a frame, in order to prepare the injection frames
"""
def __init__(self, start, duration, ifo, number = -1):
"""
Parameters
----------
number : int
The frame's number within the project. Defaults to -1.
"""
self.start = start
self.duration = duration
self.end = self.start + duration
self.ifos = ifo
self.number = -1
def __repr__(self):
out = ''
out += "MDC Frame \n"
for ifo in self.ifos:
out += "{} {} {} \n".format(ifo, self.start, self.duration)
return out
def get_rowlist(self,mdcs):
"""
Return the rows from an MDC set which correspond to this frame.
Parameters
----------
mdcs : MDCSet object
The set of MDCs from which the rows are to be found.
"""
return np.where((mdcs.times<self.end)&(mdcs.times>self.start))[0]
def calculate_n_injections(self, mdcs):
return len(mdcs.times[(mdcs.times<self.end)&(mdcs.times>self.start)])
def generate_log(self,mdc):
log = '# GravEn_SimID SimHrss SimEgwR2 GravEn_Ampl Internal_x Internal_phi External_x External_phi External_psi FrameGPS EarthCtrGPS SimName SimHpHp SimHcHc SimHpHc H1 H1ctrGPS H1fPlus H1fCross L1 L1ctrGPS L1fPlus L1fCross\n'
rowlist = self.get_rowlist(mdc)
for row in rowlist:
log += mdc.gravEn_row(row, self)
log += "\n"
return log
def generate_gwf(self, mdc, directory, project = "Minke", channel="SCIENCE", force=False, rate=16384.0):
"""
Produce the gwf file which corresponds to the MDC set over the period of this frame.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
project : str
The name of the project which this frame is a part of. Defaults to 'Minke'.
channel : str
The name of the channel which the injections should be made into. This is prepended by the initials
for each interferometer, so there will be a channel for each interferometer in the gwf.
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
gwf
The GWF file for this frame.
"""
ifosstr = "".join(set(ifo[0] for ifo in self.ifos))
family = mdc.waveforms[0].waveform
epoch = lal.LIGOTimeGPS(self.start)
filename = "{}-{}-{}-{}.gwf".format(ifosstr, family, self.start, self.duration)
self.frame = lalframe.FrameNew(epoch = epoch,
duration = self.duration, project='', run=1, frnum=1,
detectorFlags=lal.LALDETECTORTYPE_ABSENT)
ifobits = np.array([getattr(lal,"{}_DETECTOR_BIT".format(lal.cached_detector_by_prefix[ifo].frDetector.name.upper()))
for ifo in self.ifos])
ifoflag = numpy.bitwise_or.reduce(ifobits)
RUN_NUM = -1 # Simulated data should have a negative run number
head_date = str(self.start)[:5]
frameloc = directory+"/"+mdc.directory_path()+"/"+head_date+"/"
mkdir(frameloc)
if not os.path.isfile(frameloc + filename) or force:
epoch = lal.LIGOTimeGPS(self.start)
frame = lalframe.FrameNew(epoch, self.duration, project, RUN_NUM, self.number, ifoflag)
data = []
# Loop through each interferometer
for ifo in self.ifos:
# Calculate the number of samples in the timeseries
nsamp = int((self.end-self.start)*rate)
# Make the timeseries
h_resp = lal.CreateREAL8TimeSeries("{}:{}".format(ifo, channel), epoch, 0, 1.0/rate, lal.StrainUnit, nsamp)
# Loop over all of the injections corresponding to this frame
rowlist = self.get_rowlist(mdc)
if len(rowlist)==0: return
for row in rowlist:
sim_burst = mdc.waveforms[row]._row()
if sim_burst.hrss > 1:
distance = sim_burst.amplitude
else:
distance = None
#hp, hx = lalburst.GenerateSimBurst(sim_burst, 1.0/rate);
hp, hx, _, _ = mdc.waveforms[row]._generate(rate=rate, half=True, distance=distance)
# Apply detector response
det = lalsimulation.DetectorPrefixToLALDetector(ifo)
# Produce the total strains
h_tot = lalsimulation.SimDetectorStrainREAL8TimeSeries(hp, hx,
sim_burst.ra, sim_burst.dec, sim_burst.psi, det)
# Inject the waveform into the overall timeseries
lalsimulation.SimAddInjectionREAL8TimeSeries(h_resp, h_tot, None)
lalframe.FrameAddREAL8TimeSeriesSimData(frame, h_resp)
# Make the directory in which to store the files
# if it doesn't exist already
mkdir(frameloc)
# Write out the frame file
lalframe.FrameWrite(frame, frameloc+filename)
class HWInj(Frame):
"""
Represents a hardware injection frame.
Injection frames must be an ASCII file of the hoft sampled at
the antenna sampling rate, appropriately convolved with an
antenna response function.
As a result of the simplicity of this specific output format
we do not need information such as start-time in the file itself,
however we should have a sensible naming scheme for the ASCII files
since they will need to be produced as sidecars for an xml file.
"""
def __init__(self, ifos):
"""We'll need to know the start-time, the duration, and the ifo
for each which is to be used for hardware injections in order
to keep consistency with the data in the xml file, and so that the
appropriate waveform is injected into the appropriate detector.
Parameters
----------
ifos : list
The name of the interferometers, e.g. "L1" for the Livingston, LA LIGO detector.
"""
self.ifos = ifos
def __repr__(self):
"""
The printable representation of this object.
"""
out = ""
out += "Hardware MDC Frame \n"
for ifo in self.ifos:
out += "{} \n".format(ifo)
return out
def generate_pcal(self, mdc, directory, force = False, rate=16384):
"""
Produce the PCAL-ready hardware injection files as an ASCII list
sampled at the detector's sample rate.
Parameters
----------
mdc : MDCSet object
The signal set which should be used to generate the frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
force : bool
If true this forces the regeneration of the file, even if it
already exists.
Outputs
-------
ascii file
The ASCII file containing the correctly sampled waveform convolved with
the antenna pattern.
"""
family = mdc.waveforms[0].waveform
frameloc = os.path.join(directory, (mdc.directory_path()))
#rowlist = self.get_rowlist(mdc)
# Unlike with a conventional frame, we need to produce a separate file
# for each IFO.
for ifo in self.ifos:
for sim_burst in mdc.waveforms:
#sim_burst = mdc.waveforms[row]
# Check if the file exists, or if we're forcing the creation
filename = "{}_{}_{}.txt".format(family,
sim_burst.time,
ifo)
if not os.path.isfile(frameloc + filename) or force:
data = []
epoch = lal.LIGOTimeGPS(sim_burst.time)
duration = 10
nsamp = duration*rate
h_tot = sim_burst._generate_for_detector([ifo], sample_rate=rate)
data = np.array(h_tot.data.data)
np.savetxt(filename, data)
class HWFrameSet():
def __init__(self, ifos=["H1", "L1"]):
"""
A collection of hardware injection frames.
Parameters
----------
frame_list : str
The filespath of a CSV file containing the list of frames,
and the parameters required to produce them: the start and
duration times, and the interferometers they describe.
"""
self.frames = []
self.frames = [HWInj(ifos)]
#self.frames.append(frame)
def full_frameset(self, mdc, directory, force=False):
"""
Produce the gwf files which corresponds to the MDC set over the period of the frames in this collection.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
ascii files
The ASCII files for these hardware injections.
"""
for frame in self.frames:
frame.generate_pcal(mdc, directory, force)
class FrameSet():
def __init__(self, frame_list):
"""
A collection of frames.
Parameters
----------
frame_list : str
The filespath of a CSV file containing the list of frames,
and the parameters required to produce them: the start and
duration times, and the interferometers they describe.
"""
self.frames = []
self.frame_list = frame_list = pd.read_csv(frame_list)
for frame in frame_list.iterrows():
frame = frame[1]
ifos = frame['ifo'].replace("['",'').replace("']",'').replace("'",'').split(' ')
frame = Frame(frame['start time'],frame['duration'],ifos)
self.frames.append(frame)
def full_frameset(self, mdc, directory, channel="SCIENCE", force=False):
"""
Produce the gwf files which corresponds to the MDC set over the period of the frames in this collection.
Parameters
----------
mdc : MDCSet object
The MDC set which should be used to produce this frame.
directory : str
The root directory where all of the frames are to be stored, for example
"/home/albert.einstein/data/mdc/frames/"
would cause the SineGaussian injections to be made in the directories under
"/home/albert.einstein/data/mdc/frames/sg"
channel : str
The name of the channel which the injections should be made into. This is prepended by the initials
for each interferometer, so there will be a channel for each interferometer in the gwf.
force : bool
If true this forces the recreation of a GWF file even if it already exists.
Outputs
-------
gwf files
The GWF files for these frames.
"""
for frame in self.frames:
frame.generate_gwf(mdc, directory, channel, force)
def full_logfile(self, mdc, location):
"""
Produce a log file for the entire frame set
"""
full_log = ''
for frame in self.frames:
full_log += frame.generate_log(mdc)
with open(location, "w") as text_file:
text_file.write(full_log)
| isc |
njwilson23/rasterio | rasterio/tool.py | 1 | 5429 | """
Implementations of various common operations, like `show()` for displaying an
array or with matplotlib, and `stats()` for computing min/max/avg. Most can
handle a numpy array or `rasterio.Band()`. Primarily supports `$ rio insp`.
"""
from __future__ import absolute_import
import code
import collections
import logging
import warnings
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
except RuntimeError as e:
# Certain environment configurations can trigger a RuntimeError like:
# Trying to import matplotlibRuntimeError: Python is not installed as a
# framework. The Mac OS X backend will not be able to function correctly
# if Python is not installed as a framework. See the Python ...
warnings.warn(str(e), RuntimeWarning, stacklevel=2)
plt = None
import numpy
import rasterio
from rasterio.five import zip_longest
logger = logging.getLogger('rasterio')
Stats = collections.namedtuple('Stats', ['min', 'max', 'mean'])
# Collect dictionary of functions for use in the interpreter in main()
funcs = locals()
def show(source, cmap='gray', with_bounds=True):
"""
Display a raster or raster band using matplotlib.
Parameters
----------
source : array-like or (raster dataset, bidx)
If array-like, should be of format compatible with
matplotlib.pyplot.imshow. If the tuple (raster dataset, bidx),
selects band `bidx` from raster.
cmap : str (opt)
Specifies the colormap to use in plotting. See
matplotlib.Colors.Colormap. Default is 'gray'.
with_bounds : bool (opt)
Whether to change the image extent to the spatial bounds of the image,
rather than pixel coordinates. Only works when source is
(raster dataset, bidx).
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
xs = source[0].res[0] / 2.
ys = source[0].res[1] / 2.
if with_bounds:
extent = (source[0].bounds.left - xs, source[0].bounds.right - xs,
source[0].bounds.bottom - ys, source[0].bounds.top - ys)
else:
extent = None
else:
arr = source
extent = None
if plt is not None:
imax = plt.imshow(arr, cmap=cmap, extent=extent)
fig = plt.gcf()
fig.show()
else:
raise ImportError("matplotlib could not be imported")
def stats(source):
"""Return a tuple with raster min, max, and mean.
"""
if isinstance(source, tuple):
arr = source[0].read(source[1])
else:
arr = source
return Stats(numpy.min(arr), numpy.max(arr), numpy.mean(arr))
def show_hist(source, bins=10, masked=True, title='Histogram'):
"""
Easily display a histogram with matplotlib.
Parameters
----------
bins : int, optional
Compute histogram across N bins.
data : np.array or rasterio.Band or tuple(dataset, bidx)
Input data to display. The first three arrays in multi-dimensional
arrays are plotted as red, green, and blue.
masked : bool, optional
When working with a `rasterio.Band()` object, specifies if the data
should be masked on read.
title : str, optional
Title for the figure.
"""
if plt is None:
raise ImportError("Could not import matplotlib")
if isinstance(source, (tuple, rasterio.Band)):
arr = source[0].read(source[1], masked=masked)
else:
arr = source
# The histogram is computed individually for each 'band' in the array
# so we need the overall min/max to constrain the plot
rng = arr.min(), arr.max()
if len(arr.shape) is 2:
arr = [arr]
colors = ['gold']
else:
colors = ('red', 'green', 'blue', 'violet', 'gold', 'saddlebrown')
# If a rasterio.Band() is given make sure the proper index is displayed
# in the legend.
if isinstance(source, (tuple, rasterio.Band)):
labels = [str(source[1])]
else:
labels = (str(i + 1) for i in range(len(arr)))
# This loop should add a single plot each band in the input array,
# regardless of if the number of bands exceeds the number of colors.
# The colors slicing ensures that the number of iterations always
# matches the number of bands.
# The goal is to provide a curated set of colors for working with
# smaller datasets and let matplotlib define additional colors when
# working with larger datasets.
for bnd, color, label in zip_longest(arr, colors[:len(arr)], labels):
plt.hist(
bnd.flatten(),
bins=bins,
alpha=0.5,
color=color,
label=label,
range=rng
)
plt.legend(loc="upper right")
plt.title(title, fontweight='bold')
plt.grid(True)
plt.xlabel('DN')
plt.ylabel('Frequency')
fig = plt.gcf()
fig.show()
def main(banner, dataset, alt_interpreter=None):
""" Main entry point for use with python interpreter """
local = dict(funcs, src=dataset, np=numpy, rio=rasterio, plt=plt)
if not alt_interpreter:
code.interact(banner, local=local)
elif alt_interpreter == 'ipython':
import IPython
IPython.InteractiveShell.banner1 = banner
IPython.start_ipython(argv=[], user_ns=local)
else:
raise ValueError("Unsupported interpreter '%s'" % alt_interpreter)
return 0
| bsd-3-clause |
Harhoy/transport | transport.py | 1 | 9259 | from __future__ import division
import numpy as np
import math as m
from easygui import multenterbox
import pandas as pd
import matplotlib.pyplot as plt
import math as m
def import_xl(file_path):
df = pd.read_excel(file_path,header = None)
df = df.values
return df
def export_xl(file_path,sheets):
writer = pd.ExcelWriter(file_path)
for sheet,name in sheets.items():
df = pd.DataFrame(name)
df.to_excel(writer,sheet)
writer.save()
#Henter ut en kolonne
def column(matrix, i):
return [row[i] for row in matrix]
#Henter ut en rad
def row(matrix, i):
return [column[i] for column in matrix]
#Selection sort O(n2)
def selection_sort(array):
n = len(array)
for i in range(0,n):
smallest = i
for j in range(i,n):
if array[j]<array[smallest]:
smallest = j
copy = array[i]
array[i] = array[smallest]
array[smallest] = copy
return array
#Leser om to lister inneholder minst ett felles tall
def common_node(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
stop = max([len(x),len(y)])-1
while min([i,j])< stop:
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share = 1
j = 10**6
i = 10**6
return share
def common_node_count(array_1,array_2):
x = selection_sort(array_1)
y = selection_sort(array_2)
i = 0
j = 0
share = 0
while i < len(x) and j < len(y):
if x[i]>y[j]:
j+=1
elif x[i]<y[j]:
i+=1
else:
share += 1
j +=1
i +=1
return share
#KORTERSTE RUTE FUNKSJONER
#Lager en graf fra lenke-liste
def make_graph(array):
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
matrix = np.full((nodes,nodes),10**6) #Lager matrise med store tall som byttes
for i in range(0,len(array)): #Hovedloop
#Trekker fra en for sammenlignbarhet med python-arrays
matrix[array[i][1]-1][array[i][0]-1] = array[i][2]
matrix[array[i][0]-1][array[i][1]-1] = array[i][2]
np.fill_diagonal(matrix, 0)
return matrix
#Lager lengdematrise n x n
def floyd_warshall(array):
matrix = make_graph(array)
#nodes = common_node_count(column(array,0),column(array,1))
nodes = 35
pred = np.full((nodes,nodes),-1)
for i in range(0,nodes):
for j in range(0,nodes):
if i != j:
pred[i][j] = i
for k in range(0,nodes):
for i in range(0,nodes):
for j in range(0,nodes):
if matrix[i][j] > matrix[i][k] + matrix[k][j]:
matrix[i][j] = matrix[i][k] + matrix[k][j]
pred[i][j] = pred[k][j]
return matrix,pred
#Laster inn data fra en csv fil til et nettverksarray
def get_network(net_csv):
graf = open(net_csv,'r')
lenker=0
for line in graf:
lenker+=1
graf_edit = np.full((lenker, 3),0)
graf = open(net_csv,'r')
k = 0
for line in graf:
stuff = line.split(";")
graf_edit[k][0] = float(stuff[0])
graf_edit[k][1] = float(stuff[1])
temp = stuff[2].split('\n')[0]
graf_edit[k][2] = float(temp)
k+=1
return graf_edit
#Lager en path-vektor
def path(p,i,j,path_vec):
if i == j:
path_vec.append(i)
else:
path(p, i, p[i][j],path_vec)
path_vec.append(j)
#Henter en spesifikk path
def get_path(p,i,j):
#j = j + 1
path_vec=[]
path(p,i,j,path_vec)
#for i in range(1,len(path_vec)):
# path_vec[i] = path_vec[i] - 1
return path_vec
#Lager adjecency-matrise (ikke ferdig)
def build_adj(pred):
adj_mat = np.zeros((len(pred),len(pred)))
array_a = []
array_b = []
for i in range(1,len(pred)):
for j in range(1,len(pred)):
array_a = get_path(pred,i,j)
print array_a
array_b = get_path(pred,2,10)
print array_b
try:
adj_mat[1][j] = common_node(array_a,array_b)
except:
adj_mat[1][j] = 0
print adj_mat[1][j]
return adj_mat
#Nettverkslaster
#Argumenter: (1) Forgjenger-matrise (2) antall noder (3) nettverksfil (4) od-matrise
def network_loader(graf,net,od,pred):
#Antall noder
n = len(od)-1
#Redigering
for k in range(0,len(net)):
net[k][3]=0 #Nulllstiller antall reiser
net[k][2]=graf[k][2] #Legger inn oppdaterteavstander fra grafen
#Legger ut reiser paa nettet
for i in range(0,n):
for j in range(0,n):
path = get_path(pred,i,j)
len_path=get_len_path(path)
for h in range(0,len_path):
for k in range(0,len(net)):
if net[k][0] == path[h]+1 and net[k][1] == path[1+h]+1:
net[k][3] += int(od[i][j])
elif net[k][1] == path[h]+1 and net[k][0] == path[1+h]+1:
net[k][3] += int(od[i][j])
return net
#a=get_path(pred,5,12)
#GRAVITASJONSFUNKSJONER
def deter_mat_make(length_mat):
deter_mat = np.zeros((len(length_mat),len(length_mat)))
for i in range(0,len(length_mat)):
for j in range(0,len(length_mat)):
deter_mat[i][j] = deter(length_mat[i][j])
return deter_mat
def deter(length):
return 2.71**(beta*length)
def sumproduct(list1,list2):
sums = 0
for i in range(0,len(list1)):
sums += list1[i]*list2[i]
return sums
def gravity(origin, destination, length_mat):
#Initialization
deter_mat = deter_mat_make(length_mat) #Lager matrise med forvitring
dimension = len(origin) #Henter ut matrisedimensjonene
alpha = [1]*(dimension) #Intitierer alpha-vektor
beta = [1]*(dimension) #Intitierer beta-vektor
largest = 10**6 #Intitierer storste avvik
alpha_last = alpha #Intitierer alpha -1
beta_last = beta #Intitierer beta -1
k = 0 #Intitierer tellevariabler for iterasjoner
iterasjoner = []
#Hovedlokke
while largest > .00001:
#Oppdaterer faktorene
for p in range(0,dimension):
alpha[p] = origin[p]/(sumproduct(beta_last,column(deter_mat,p)))
beta[p] = destination[p]/(sumproduct(alpha,row(deter_mat,p)))
largest = 0
#Looper for aa finne storste element
for j in range(0,dimension):
current = alpha[j]*sumproduct(beta,column(deter_mat,j))-origin[j]
if current>largest:
largest = current
#Setter forrige beta
beta_last = beta
iterasjoner.append(largest)
#Legger til en iterasjon
k+=1
print "Konvergens, Gravitasjonsmodell", largest
if k == maxiter:
largest = 0
return alpha,beta,k,iterasjoner
def create_od(origin,destination, length_mat):
alpha,beta,k,iterasjoner = gravity(origin, destination, length_mat)
deter_mat = deter_mat_make(length_mat)
od = np.zeros((len(origin),len(origin)))
for i in range(0,len(origin)):
for j in range(0,len(origin)):
od[i][j] = alpha[i]*beta[j]*deter_mat[i][j]
return od,alpha,beta,k,iterasjoner
def calc_pt_matrix(od,length_mat):
out_od = np.zeros((len(od),len(od)))
for i in range(0,len(od)):
for j in range(0,len(od)):
out_od[i][j] = int(out_od[i][j])*length_mat[i][j]
return out_od
def get_min(net):
smallest = 10**6
smallest_id = 10**6
for i in range(0,len(net)):
if net[i][3]/net[i][2]<smallest and net[i][5]==0:
smallest = net[i][3]/net[i][2]
smallest_id = i
return smallest_id,smallest
def change_graph(graph,net):
graph_out = graph
for i in range(0,len(net)):
if net[i][5]==1:
graph_out[i][2]=k_just*graph_out[i][2]
return graph_out
def production(net):
sumcost = 0
for i in range(0,len(net)):
if net[i][5]!=1:
sumcost += (net[i][3]/capacity)*net[i][2]
return sumcost
def sum_pass(net):
sumpass = 0
for i in range(0,len(net)):
sumpass+=net[i][3]
return sumpass
def get_len_path(path):
len_path = 0
if len(path) < 3:
len_path = 0
elif len(path) == 3:
len_path = 2
else:
len_path=int(len(path)/2)+int(len(path)%2)+1
return len_path
def obj(od,length_mat,net,prodgoal):
return (production(net)*kmk*dogn-prodgoal)**2*(k_just-1)*capacity/.9+time_cost(od,length_mat)
def time_cost(od,length_mat):
cost = 0
for i in range(0,len(od)-1):
for j in range(0,len(od)-1):
cost += od[i][j]*length_mat[i][j]
return cost
def get_zero_net(net):
zero_net = np.zeros((len(net),6))
for i in range(0,len(net)):
zero_net[i][2] = net[i][2]
zero_net[i][3] = net[i][3]
zero_net[i][5] = net[i][5]
return zero_net
def update_zero_net(net,zero_net):
for i in range(0,len(net)):
zero_net[i][5] = net[i][5]
return zero_net
| mit |
lalitkumarj/NEXT-psych | next/apps/TupleBanditsPureExploration/Dashboard.py | 1 | 3313 | """
TupleBanditsPureExplorationDashboard
author: Nick Glattard, n.glattard@gmail.com
last updated: 4/24/2015
######################################
TupleBanditsPureExplorationDashboard
"""
import json
import numpy
import numpy.random
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from next.utils import utils
from next.apps.AppDashboard import AppDashboard
class TupleBanditsPureExplorationDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def get_app_supported_stats(self):
"""
Returns a list of dictionaries describing the identifier (stat_id) and
necessary params inputs to be used when calling getStats
Expected output (list of dicts, each with fields):
(string) stat_id : the identiifer of the statistic
(string) description : docstring of describing outputs
(list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task'
"""
stat_list = self.get_supported_stats()
stat = {}
stat['stat_id'] = 'most_current_ranking'
stat['description'] = self.most_current_ranking.__doc__
stat['necessary_params'] = ['alg_label']
stat_list.append(stat)
return stat_list
def most_current_ranking(self,app_id,exp_uid,alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field' where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list')
for algorithm in alg_list:
if algorithm['alg_label'] == alg_label:
alg_id = algorithm['alg_id']
alg_uid = algorithm['alg_uid']
list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid})
list_of_log_dict = sorted(list_of_log_dict, key=lambda k: k['num_reported_answers'] )
print didSucceed, message
item = list_of_log_dict[-1]
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},{'label':'Target','field':'index'},{'label':'Score','field':'score'},{'label':'Precision','field':'precision'}]
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
| apache-2.0 |
jgowans/correlation_plotter | rfi_looker.py | 1 | 1835 | #!/usr/bin/env python
import os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
results_directory = os.getenv('HOME') + "/rfi_capture_results/"
SAMPLE_FREQUENCY = 3600.0 # MHz and us
ADC_SCALE_VALUE = 707.94
# algorithm:
# open a .npy file (or do the disk buffer thing)
filename = raw_input("what file should be open? [most recent] ")
if filename == "": # default to the most recent file
filename = "/tmp/rfi_signal.npy"
else:
filename = results_directory + filename
signal = np.load(filename)
decimation_factor = int(len(signal)/2**20) + 1
print "decimation factor: " + str(decimation_factor)
if decimation_factor >= 2 :
signal_decimated = scipy.signal.decimate(signal, decimation_factor, n=1, ftype="fir")
else:
signal_decimated = signal
print "len : " + str(len(signal_decimated))
axis = np.linspace(0, decimation_factor * len(signal_decimated)/SAMPLE_FREQUENCY, len(signal_decimated), endpoint=False)
plt.plot(axis, signal_decimated, "b.")
plt.show()
# plot the signal decimated by a paramamter (defualt: 1)
# ask the user to input a subplot time
start_time = float(raw_input("At what time (microseconds) does the signal start? "))
end_time = float(raw_input("At what time (microseconds) does the signal end? "))
start_sample = int( start_time * SAMPLE_FREQUENCY )
end_sample = int( end_time * SAMPLE_FREQUENCY )
subsignal = signal[start_sample:end_sample]
subsignal_axis = np.linspace(start_time, end_time, len(subsignal), endpoint=False)
spectrum = np.fft.rfft(subsignal)
spectrum_axis = np.linspace(0, SAMPLE_FREQUENCY/2, len(spectrum), endpoint=False)
plt.subplot(211)
plt.plot(subsignal_axis, subsignal)
plt.subplot(212)
plt.plot(spectrum_axis, 10*np.log10( np.abs(spectrum) / (ADC_SCALE_VALUE*len(spectrum) )))
plt.show()
# plot the subplot and the fft of the subplot
| mit |
dblN/misc | utils.py | 1 | 3046 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from keras.layers import Dense
from keras.preprocessing.image import apply_transform
import matplotlib.pyplot as plt
def take_glimpses(image, location, sizes):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location,
normalized=True, centered=True, uniform_noise=False)
glimpses += [tf.image.resize_images(glimpse, resize)]
return glimpses
def glimpse_network(image, location, sizes, activation="relu",
glimpse_num_features=128, location_num_features=128, output_dim=256):
assert len(sizes) == 3
with tf.variable_scope("glimpse_network"):
glimpses = []
resize = sizes[0]
for size in sizes:
glimpse = tf.image.extract_glimpse(image, size=size, offsets=location, uniform_noise=False,
normalized=True, centered=True)
glimpses += [tf.image.resize_images(glimpse, resize[0], resize[1])]
glimpse = tf.concat(-1, glimpses)
glimpse = tf.reshape(glimpse, (-1, np.prod(resize) * len(sizes)))
glimpse_feature = Dense(glimpse_num_features, activation=activation)(glimpse)
location_feature = Dense(location_num_features, activation=activation)(location)
feature = Dense(output_dim, activation=activation)(glimpse_feature + location_feature)
return feature, glimpses
def accuracy_score(y_preds, y_true):
return np.sum((y_preds == y_true).astype(np.float32)) / y_preds.shape[0]
def translate(batch_x, size=(128, 128)):
"""Make translated mnist"""
height = batch_x.shape[1]
width = batch_x.shape[2]
X = np.zeros((batch_x.shape[0],) + size + (1,), dtype=batch_x.dtype)
X[:, :height, :width, :] = batch_x
for i, x in enumerate(X[:]):
tx = np.random.uniform(-(size[1] - width), 0)
ty = np.random.uniform(-(size[0] - height), 0)
translation_matrix = np.asarray([
[1, 0, tx],
[0, 1, ty],
[0, 0, 1]
], dtype=batch_x.dtype)
X[i, :, :, :] = apply_transform(x, translation_matrix, channel_index=2, fill_mode="nearest", cval=0.)
return X
def plot_glimpse(images, locations, name="glimpse.png"):
image = images[0]
location = locations[:, 0, :]
fig = plt.figure()
plt.imshow(image, cmap=plt.get_cmap("gray"))
plt.plot(location[:, 0], location[:, 1])
for i, (x, y) in enumerate(location):
plt.annotate("t=%d" % i, xy=(x, y), xytext=(-10, 10),
textcoords="offset points", ha="right", va="bottom",
bbox=dict(boxstyle="round, pad=0.5", fc="white", alpha=0.5),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=0"))
plt.savefig(name)
plt.gcf().clear()
plt.close("all")
| mit |
pyxll/pyxll-examples | matplotlib/interactiveplot.py | 1 | 3441 | """
Example code showing how to draw an interactive matplotlib figure
in Excel.
While the figure is displayed Excel is still useable in the background
and the chart may be updated with new data by calling the same
function again.
"""
from pyxll import xl_func
from pandas.stats.moments import ewma
# matplotlib imports
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
# Qt imports
from PySide import QtCore, QtGui
import timer # for polling the Qt application
# dict to keep track of any chart windows
_plot_windows = {}
@xl_func("string figname, numpy_column<float> xs, numpy_column<float> ys, int span: string")
def mpl_plot_ewma(figname, xs, ys, span):
"""
Show a matplotlib line plot of xs vs ys and ewma(ys, span) in an interactive window.
:param figname: name to use for this plot's window
:param xs: list of x values as a column
:param ys: list of y values as a column
:param span: ewma span
"""
# Get the Qt app.
# Note: no need to 'exec' this as it will be polled in the main windows loop.
app = get_qt_app()
# create the figure and axes for the plot
fig = Figure(figsize=(600, 600), dpi=72, facecolor=(1, 1, 1), edgecolor=(0, 0, 0))
ax = fig.add_subplot(111)
# calculate the moving average
ewma_ys = ewma(ys, span=span)
# plot the data
ax.plot(xs, ys, alpha=0.4, label="Raw")
ax.plot(xs, ewma_ys, label="EWMA")
ax.legend()
# generate the canvas to display the plot
canvas = FigureCanvas(fig)
# Get or create the Qt windows to show the chart in.
if figname in _plot_windows:
# get from the global dict and clear any previous widgets
window = _plot_windows[figname]
layout = window.layout()
if layout:
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
else:
# create a new window for this plot and store it for next time
window = QtGui.QWidget()
window.resize(800, 600)
window.setWindowTitle(figname)
_plot_windows[figname] = window
# create the navigation toolbar
toolbar = NavigationToolbar(canvas, window)
# add the canvas and toolbar to the window
layout = window.layout() or QtGui.QVBoxLayout()
layout.addWidget(canvas)
layout.addWidget(toolbar)
window.setLayout(layout)
window.show()
return "[Plotted '%s']" % figname
#
# Taken from the ui/qt.py example
#
def get_qt_app():
"""
returns the global QtGui.QApplication instance and starts
the event loop if necessary.
"""
app = QtCore.QCoreApplication.instance()
if app is None:
# create a new application
app = QtGui.QApplication([])
# use timer to process events periodically
processing_events = {}
def qt_timer_callback(timer_id, time):
if timer_id in processing_events:
return
processing_events[timer_id] = True
try:
app = QtCore.QCoreApplication.instance()
if app is not None:
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
finally:
del processing_events[timer_id]
timer.set_timer(100, qt_timer_callback)
return app
| unlicense |
nateGeorge/IDmyDog | process_ims/other/2d_haralick_map.py | 1 | 3493 | from __future__ import print_function
import pandas as pd
import pickle as pk
import cv2
import os
import re
import progressbar
import imutils
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mahotas.features import haralick
import json
from sklearn.decomposition import PCA
plt.style.use('seaborn-dark')
def get_fg_bg_rects(fg):
b, g, r, a = cv2.split(fg)
h, w = fg.shape[:2]
h -= 1
w -= 1 # to avoid indexing problems
rectDims = [10, 10] # h, w of rectangles
hRects = h / rectDims[0]
wRects = w / rectDims[1]
fgRects = []
bgRects = []
for i in range(wRects):
for j in range(hRects):
pt1 = (i * rectDims[0], j * rectDims[1])
pt2 = ((i + 1) * rectDims[0], (j + 1) * rectDims[1])
# alpha is 255 over the part of the dog
if a[pt1[1], pt1[0]] == 255 and a[pt2[1], pt2[0]] == 255:
fgRects.append([pt1, pt2])
#cv2.rectangle(fgcp, pt1, pt2, [0, 0, 255], 2) # for debugging
elif a[pt1[1], pt1[0]] == 0 and a[pt2[1], pt2[0]] == 0:
bgRects.append([pt1, pt2])
#cv2.rectangle(bgcp, pt1, pt2, [0, 0, 255], 2)
return fgRects, bgRects
def get_avg_hara(im, rects):
# returns the haralick texture averaged over all rectangles in an image
if len(rects)==0:
return None
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = 0
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara += haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0)
hara /= (len(rects))
return hara
def make_hara_map(im, rects):
# draws heatmap of haralick texture PCA dim1 variance
if len(rects)==0:
return None
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
hara = []
for r in rects:
# slice images as: img[y0:y1, x0:x1]
hara.append(pcaFG.transform(haralick(im[r[0][1]:r[1][1], r[0][0]:r[1][0]]).mean(0).reshape(1, -1)))
hara = np.array(hara)
haraMean = np.mean(hara, axis=0)
haraStd = np.std(hara, axis=0)
haraMins = np.min(hara, axis=0)
haraMaxs = np.max(hara, axis=0)
norm = (haraMaxs-haraMins)
copy = im.copy()
copy = cv2.cvtColor(copy, cv2.COLOR_BGRA2RGBA)
im = cv2.cvtColor(im, cv2.COLOR_BGRA2RGBA)
for i in range(hara.shape[0]):
brightScale = 255*(hara[i] - haraMins)/norm
bright = brightScale[0][0]
r = rects[i]
cv2.rectangle(copy, r[0], r[1], [0, bright, 0, 255], -1)
f, axarr = plt.subplots(2, 1)
axarr[0].imshow(copy)
axarr[1].imshow(im)
plt.show()
# load configuration
with open('../../config.json', 'rb') as f:
config = json.load(f)
mainImPath = config['image_dir']
pDir = config['pickle_dir']
pcaFG = pk.load(open(pDir + 'pcaFG.pk', 'rb'))
bb = pk.load(open(pDir + 'pDogs-bounding-boxes-clean.pd.pk', 'rb'))
bb.dropna(inplace=True)
# do something like sorted(bb.breed.unique().tolist())[50:] to check another breed
for breed in sorted(bb.breed.unique().tolist())[50:]:
print('breed:', breed)
cropDir = mainImPath + breed + '/grabcut/'
fgDir = cropDir + 'fg/'
fgFiles = os.listdir(fgDir)
for fi in fgFiles:
try:
fg = cv2.imread(fgDir + fi, -1) # -1 tells it to load alpha channel
except Exception as err:
print('exception:', err)
continue
fgRects, bgRects = get_fg_bg_rects(fg)
make_hara_map(fg, fgRects)
| mit |
eduardoftoliveira/oniomMacGyver | scripts/draw_PES.py | 2 | 3836 | #!/usr/bin/env python
import matplotlib as mpl
from matplotlib import pyplot as plt
import argparse
def add_adiabatic_map_to_axis(axis, style, energies, color):
""" add single set of energies to plot """
# Energy horizontal decks
x = style['START']
for energy in energies:
axis.plot([x, x+style['WIDTH']], [energy, energy],
'-%s' % color, linewidth=2)
x += style['SPACING']
# Connect steps
x = style['START']
for i in range(1, len(energies)):
x1 = x + style['WIDTH']
x2 = x + style['SPACING']
y1 = energies[i-1]
y2 = energies[i]
axis.plot([x1, x2], [y1, y2], '-%s' % color)
x += style['SPACING']
def getargs():
parser = argparse.ArgumentParser(description="""
Make plot from user provided energies.
Can read multiple sets of energies.""",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-o', '--output',
default='PES.svg',
help='File name of output figure')
parser.add_argument('--dpi',
default=300, type=int,
help='Resolution for bitmaps')
parser.add_argument('-e', '--energies',
nargs='+', type=float, action='append',
help='Energies for any number of stationary points')
parser.add_argument('-l', '--labels', nargs='+',
help='Name of stationary points')
parser.add_argument('-c', '--colors', nargs='+',
help='Color codes')
args = parser.parse_args()
# less colors than PES ? add 'k'
if args.colors:
missing_colors = len(args.energies) - len(args.colors)
missing_colors = (missing_colors > 0) * missing_colors
args.colors += 'k' * missing_colors
return args
def makelabels(N):
""" Make automatic labels: TS1, INT1, TS2, etc.."""
labels = ['R']
n_ts = N / 2
n_i = (N - 2) / 2
n_i = n_i * (n_i > 0) # becomes zero if negative
for i in range(n_ts + n_i):
if i % 2:
labels.append('INT%d' % (i/2+1))
else:
labels.append('TS%d' % (i/2+1))
if N % 2 and N >= 3:
labels.append('P')
return labels
def configure_axis_limits(axis, style, energies):
# Appearance
ymin, ymax = float('+inf'), float('-inf')
maxlen = 0
for energy_set in energies:
ymin = min(ymin, min(energy_set))
ymax = max(ymax, max(energy_set))
maxlen = max(len(energy_set), maxlen)
yrange = ymax-ymin
axis.set_ylim(ymin-0.1*yrange, ymax+0.1*yrange)
xmax = style['START']*2 + style['WIDTH'] + (maxlen-1)*style['SPACING']
axis.set_xlim(0, xmax)
axis.set_xticks([
style['START']+i*style['SPACING']+style['WIDTH']/2.0 for i in range(maxlen)])
return maxlen
def main():
# get user input
args = getargs()
# important style features
style = {
'WIDTH' : 4, # width of horizontal bars
'SPACING' : 10, # spacing between center of horizontal bars
'START' : 3 # x-offset from y-axis
}
# Configure Figure
fig = plt.gcf()
fig.set_size_inches(3.3, 2.5)
mpl.rcParams.update({'font.size': 7, 'axes.linewidth':0.5})
plt.subplots_adjust(bottom=.15)
plt.subplots_adjust(left=.15)
plt.ylabel('Energy (kcal/mol)')
plt.xlabel('Reaction coordinate')
ax = fig.gca()
ax.grid(True)
maxlen = configure_axis_limits(ax, style, args.energies)
if not args.labels:
args.labels = makelabels(maxlen)
ax.set_xticklabels(args.labels)
# plot stuff
color = 'k'
for j,energies in enumerate(args.energies):
if args.colors:
color = args.colors[j]
add_adiabatic_map_to_axis(ax, style, energies, color)
plt.savefig(args.output, dpi=args.dpi)
if __name__ == '__main__':
main()
| gpl-3.0 |
sambitgaan/nupic | examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py | 32 | 6059 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| agpl-3.0 |
tsherwen/AC_tools | Scripts/2D_GEOSChem_slice_subregion_plotter_example.py | 1 | 2934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
| mit |