repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/enrichment.py | 1 | 40209 | """
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import math
import random
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
import utils
import bam
from regions import AnnotationGene, AnnotationTranscript, AnnotationExon, RegionWriter, read_gff_file, get_exons, get_introns, gene_slide
import warnings
try:
from shutil import move
except:
from os import rename as move
"""
Differential expression and MA plot visualization module.
"""
def _region_from_dual(self, line):
try:
self.cluster_aux.clear()
self.cluster_aux.read_line(line)
strand = None
if self.stranded_analysis:
strand = self.cluster_aux.strand
ret = Region(self.cluster_aux.name, self.cluster_aux.start, self.cluster_aux.end, name2=self.cluster_aux.name2, strand=strand)
self.cluster_aux.clear()
return ret
except ValueError:
pass #discarding header
def __calc_reg_write(self, region_file, count, calculated_region):
if count > self.region_mintags:
region_file.write(calculated_region.write())
def calculate_region(self):
"""
Calculate a region file using the reads present in the both main files to analyze.
"""
self.logger.info('Generating regions...')
self.sorted_region_path = '%s/calcregion_%s.bed'%(self._output_dir(), os.path.basename(self.current_output_path))
region_file = open(self.sorted_region_path, 'wb')
if self.region_magic:
regwriter = RegionWriter(self.gff_file, region_file, self.region_magic, no_sort=self.no_sort, logger=self.logger, write_as=BED, galaxy_workarounds=self.galaxy_workarounds)
regwriter.write_regions()
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
if self.stranded_analysis:
calculate_region_stranded(self, dual_reader, region_file)
else:
calculate_region_notstranded(self, dual_reader, region_file)
region_file.flush()
def __cr_append(self, regions, region):
regions.append(region)
def calculate_region_notstranded(self, dual_reader, region_file):
calculated_region = Region()
readcount = 1
for line in dual_reader:
if not calculated_region: #first region only
calculated_region = _region_from_dual(self, line)
calculated_region.end += self.proximity
else:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if calculated_region.overlap(new_region):
calculated_region.join(new_region)
readcount += 1
else:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
calculated_region = new_region.copy()
readcount = 1
if calculated_region:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
def calculate_region_stranded(self, dual_reader, region_file):
temp_region_file = open(self.sorted_region_path, 'wb')
region_plus = Region()
region_minus = Region()
regions = []
numreads_plus = 1
numreads_minus = 1
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
for line in dual_reader:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if not (region_plus and new_region.strand == PLUS_STRAND):
region_plus = _region_from_dual(self, line)
elif not (region_plus and new_region.strand == PLUS_STRAND):
region_minus = _region_from_dual(self, line)
else:
if region_plus.overlap(new_region) and region_plus.strand == new_region.strand:
region_plus.join(new_region)
numreads_plus += 1
elif region_minus.overlap(new_region) and region_minus.strand == new_region.strand:
region_minus.join(new_region)
numreads_minus += 1
else:
if new_region.strand == region_plus.strand:
region_plus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_plus, region_plus)
region_plus = new_region.copy()
numreads_plus = 1
else:
region_minus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_minus, region_minus)
region_minus = new_region.copy()
numreads_minus = 1
if region_plus:
region_plus.end -= self.proximity
regions.append(region_plus)
if region_minus:
region_minus.end -= self.proximity
regions.append(region_minus)
regions.sort(key=lambda x:(x.name, x.start, x.end, x.strand))
for region in regions:
region_file.write(region.write())
def get_zscore(x, mean, sd):
if sd > 0:
return float(x-mean)/sd
else:
return 0 #This points are weird anyway
def read_interesting_regions(self, file_path):
regs = []
try:
regs_file = open(file_path, 'r')
for line in regs_file:
regs.append(line.strip())
except IOError as ioerror:
self.logger.warning("Interesting regions file not found")
return regs # memory inefficient if there's a large number of interesting regions
def plot_enrichment(self, file_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import *
from matplotlib import rcParams
rcParams.update({'font.size': 22})
rcParams['legend.fontsize'] = 14
#decide labels
if self.label1:
label_main = self.label1
else:
if self.real_control_path and self.real_experiment_path:
label_main = '%s VS %s'%(os.path.basename(self.real_experiment_path), os.path.basename(self.real_control_path))
else:
label_main = "A VS B"
if self.label2:
label_control = self.label2
else:
if self.replica_path:
label_control = '%s(A) VS %s(A)'%(os.path.basename(self.real_experiment_path), os.path.basename(self.replica_path))
else:
label_control = 'Background distribution'
#self.logger.info("Interesting regions path: %s" % (self.interesting_regions))
interesting_regs = []
if self.interesting_regions:
self.logger.info("Reading interesting regions...")
interesting_regs = read_interesting_regions(self, self.interesting_regions)
#self.logger.info("Interesting regions: %s" % (interesting_regs))
#self.logger.info("Plot path: %s" % (file_path))
interesting_A = []
interesting_M = []
#self.logger.info("disable_significant: %s" % (self.disable_significant_color))
A = []
A_prime = []
M = []
M_significant = []
A_significant = []
M_prime = []
A_medians = []
points = []
minus_points = []
all_points = []
figure(figsize=(14,22))
biggest_A = -sys.maxint #for drawing
smallest_A = sys.maxint #for drawing
biggest_M = 0 #for drawing
self.logger.info("Loading table...")
for line in open(file_path):
sline = line.split()
try:
enrich = dict(zip(enrichment_keys, sline))
# WARNING: for slide inter and slide intra: name2 = 'start:end' (no gene_id, FIXME?)
name2 = enrich['name2'].split(':')
gene_id = name2[0]
if len(name2) >= 2:
transcript_id = name2[1] # consider transcript_id? (exons)
else:
transcript_id = None
if gene_id in interesting_regs or transcript_id in interesting_regs:
interesting_M.append(float(enrich["M"]))
interesting_A.append(float(enrich["A"]))
biggest_A = max(biggest_A, float(enrich["A"]))
smallest_A = min(smallest_A, float(enrich["A"]))
biggest_M = max(biggest_M, abs(float(enrich["M"])))
biggest_A = max(biggest_A, float(enrich["A_prime"]))
smallest_A = min(smallest_A, float(enrich["A_prime"]))
biggest_M = max(biggest_M, abs(float(enrich["M_prime"])))
positive_point = self.zscore*float(enrich["sd"])+float(enrich["mean"])
negative_point = -self.zscore*float(enrich["sd"])+float(enrich["mean"])
A_median = float(enrich["A_median"])
all_points.append((A_median, positive_point, negative_point))
if abs(float(enrich["zscore"])) < self.zscore:
M.append(float(enrich["M"]))
A.append(float(enrich["A"]))
else:
M_significant.append(float(enrich["M"]))
A_significant.append(float(enrich["A"]))
M_prime.append(float(enrich["M_prime"]))
A_prime.append(float(enrich["A_prime"]))
except ValueError:
pass #to skip the header
all_points.sort(key= lambda x:x[0])
for t in all_points:
(A_medians.append(t[0]), points.append(t[1]), minus_points.append(t[2]))
if points:
margin = 1.1
A_medians.append(biggest_A*margin)
points.append(points[-1])
minus_points.append(minus_points[-1])
A_medians.insert(0, smallest_A)
points.insert(0, points[0])
minus_points.insert(0, minus_points[0])
self.logger.info("Plotting points...")
#Background plot
subplot(211, axisbg="lightyellow")
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A_prime, M_prime, '.', label=label_control, color = '#666666')
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
axhline(0, linestyle='--', color="grey", alpha=0.75)
leg = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4, mode="expand")
leg.get_frame().set_alpha(0.5)
#Experiment plot
subplot(212, axisbg="lightyellow")
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A, M, 'k.', label=label_main)
if self.disable_significant_color:
significant_marker = 'ko'
else:
significant_marker = 'ro'
plot(A_significant, M_significant, significant_marker, label="%s (significant)"%label_main)
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
if self.interesting_regions:
interesting_label = label_main + ' (interesting)'
plot(interesting_A, interesting_M, 'H', label=interesting_label, color='#00EE00') # plotting "interesting" regions
axhline(0, linestyle='--', color="grey", alpha=0.75)
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
leg2 = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4)
leg2.get_frame().set_alpha(0.7)
self._save_figure("enrichment_MA", width=500, height=2800)
else:
self.logger.warning("Nothing to plot.")
except ImportError:
if self.debug:
raise
__matplotlibwarn(self)
def __matplotlibwarn(self):
#FIXME move to utils.py or plotting module
self.logger.warning('Pyicos can not find an installation of matplotlib, so no plot will be drawn. If you want to get a plot with the correlation values, install the matplotlib library.')
def __calc_M(signal_a, signal_b):
return math.log(float(signal_a)/float(signal_b), 2)
def __calc_A(signal_a, signal_b):
return (math.log(float(signal_a), 2)+math.log(float(signal_b), 2))/2
def _calculate_MA(self, region_path, read_counts, factor = 1, replica_factor = 1, file_a_reader=None, file_b_reader=None, replica_reader=None):
tags_a = []
tags_b = []
numreads_background_1 = 0
numreads_background_2 = 0
total_reads_background_1 = 0
total_reads_background_2 = 0
self.logger.debug("Inside _calculate_MA")
self.regions_analyzed_count = 0
enrichment_result = [] #This will hold the name, start and end of the region, plus the A, M, 'A and 'M
if NOWRITE not in self.operations:
out_file = open(self.current_output_path, 'wb')
for region_line in open(region_path):
sline = region_line.split()
region_of_interest = self._region_from_sline(sline)
if region_of_interest:
region_a = None
replica = None
replica_tags = None
signal_a = -1
signal_b = -1
signal_background_1 = -1
signal_background_2 = -1
swap1 = Region()
swap2 = Region()
if read_counts:
signal_a = float(sline[6])
signal_b = float(sline[7])*factor
signal_background_1 = float(sline[8])
signal_background_2 = float(sline[9])*replica_factor
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
else:
self.logger.debug("Reading tags for %s ..."%region_of_interest)
if self.experiment_format == BAM:
tags_a = len(file_a_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
tags_b = len(file_b_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
tags_a = file_a_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
tags_b = file_b_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
if self.use_replica:
if self.experiment_format == BAM:
replica_tags = len(replica_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
replica_tags = replica_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
self.logger.debug("... done. tags_a: %s tags_b: %s"%(tags_a, tags_b))
#if we are using pseudocounts, use the union, use the intersection otherwise
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
signal_a = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_a, tags_a)
signal_b = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_b, tags_b)
self.already_norm = True
if not self.counts_file:
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
if self.use_replica:
replica = region_of_interest.copy()
#replica.add_tags(replica_tags)
numreads_background_1 = tags_a
numreads_background_2 = replica_tags
total_reads_background_1 = self.total_reads_a
total_reads_background_2 = self.total_reads_replica
signal_background_1 = signal_a
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.total_reads_replica, replica_tags)
else:
numreads_background_1 = 0
numreads_background_2 = 0
for i in range(0, tags_a+tags_b):
if random.uniform(0,2) > 1:
numreads_background_1 += 1
else:
numreads_background_2 += 1
total_reads_background_1 = total_reads_background_2 = self.average_total_reads
signal_background_1 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_1)
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_2)
#if there is no data in the replica or in the swap and we are not using pseudocounts, dont write the data
if signal_a > 0 and signal_b > 0 and signal_background_1 > 0 and signal_background_2 > 0 or self.use_MA:
if self.use_MA and not self.already_norm:
A = float(sline[10])
M = float(sline[11])
A_prime = float(sline[16])
M_prime = float(sline[17])
else:
if not self.already_norm: #TODO refractor
if self.len_norm: #read per kilobase in region
signal_a = 1e3*(float(signal_a)/len(region_of_interest))
signal_b = 1e3*(float(signal_b)/len(region_of_interest))
signal_background_1 = 1e3*(float(signal_background_1)/len(region_of_interest))
signal_background_2 = 1e3*(float(signal_background_2)/len(region_of_interest))
if self.n_norm: #per million reads in the sample
signal_a = 1e6*(float(signal_a)/self.total_reads_a)
signal_b = 1e6*(float(signal_b)/self.total_reads_b)
if self.use_replica:
signal_background_1 = signal_a
signal_background_2 = 1e6*(float(signal_background_2)/self.total_reads_replica)
else:
signal_background_1 = 1e6*(float(signal_background_1)/self.average_total_reads)
signal_background_2 = 1e6*(float(signal_background_2)/self.average_total_reads)
A = __calc_A(signal_a, signal_b)
M = __calc_M(signal_a, signal_b)
A_prime = __calc_A(signal_background_1, signal_background_2)
M_prime = __calc_M(signal_background_1, signal_background_2)
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
if NOWRITE not in self.operations:
out_file.write("%s\n"%("\t".join([region_of_interest.write().rstrip("\n"), str(signal_a), str(signal_b), str(signal_background_1), str(signal_background_2), str(A), str(M), str(self.total_reads_a), str(self.total_reads_b), str(tags_a), str(tags_b), str(A_prime), str(M_prime), str(total_reads_background_1), str(total_reads_background_2), str(numreads_background_1), str(numreads_background_2)])))
self.regions_analyzed_count += 1
self.logger.debug("LEAVING _calculate_MA")
if NOWRITE in self.operations:
return ""
else:
out_file.flush()
out_file.close()
# Outputting to HTML (if specified)
if self.html_output is not None:
self.logger.info("Generating HTML")
try:
from jinja2 import Environment, PackageLoader, Markup
except:
self.logger.error("Could not find the jinja2 library")
return out_file.name
loadr = PackageLoader('pyicoteolib', 'templates')
env = Environment(loader=loadr)
template = env.get_template('enrich_html.html')
def jinja_read_file(filename):
f = open(filename, 'r')
#for line in f:
# print line
txt = ''.join(f.readlines())
f.close()
return txt
env.globals['jinja_read_file'] = jinja_read_file
if self.galaxy_workarounds: # Galaxy changes the working directory when outputting multiple files
parent_dir = "./"
else:
parent_dir = os.sep.join(out_file.name.split(os.sep)[0:-1]) + "/"
plot_path = parent_dir + "enrichment_MA_" + out_file.name.split(os.sep)[-1] + ".png"
bed_path = parent_dir + out_file.name.split(os.sep)[-1]
html_file = open(self.html_output, 'w')
html_file.write(template.render({'page_title': 'Enrichment results', 'results_output': jinja_read_file(out_file.name), 'plot_path': plot_path, 'bed_path': bed_path}))
html_file.flush()
html_file.close()
return out_file.name
def _calculate_total_lengths(self):
msg = "Calculating enrichment in regions"
if self.counts_file:
self.sorted_region_path = self.counts_file
if (not self.total_reads_a or not self.total_reads_b or (not self.total_reads_replica and self.use_replica)) and not self.use_MA:
self.logger.info("... counting from counts file...")
self.total_reads_a = 0
self.total_reads_b = 0
if self.total_reads_replica:
self.total_reads_replica = 0
else:
self.total_reads_replica = 1
for line in open(self.counts_file):
try:
enrich = dict(zip(enrichment_keys, line.split()))
self.total_reads_a += float(enrich["signal_a"])
self.total_reads_b += float(enrich["signal_b"])
if self.use_replica:
self.total_reads_replica += float(enrich["signal_prime_2"])
except ValueError:
self.logger.debug("(Counting) skip header...")
else:
self.logger.info("... counting number of lines in files...")
if not self.total_reads_a:
if self.experiment_format == BAM:
self.total_reads_a = bam.size(self.current_experiment_path)
else:
self.total_reads_a = sum(1 for line in utils.open_file(self.current_experiment_path, self.experiment_format, logger=self.logger))
if not self.total_reads_b:
if self.experiment_format == BAM:
self.total_reads_b = bam.size(self.current_control_path)
else:
self.total_reads_b = sum(1 for line in utils.open_file(self.current_control_path, self.control_format, logger=self.logger))
if self.use_replica and not self.total_reads_replica:
if self.experiment_format == BAM:
self.total_reads_replica = bam.size(self.replica_path)
else:
self.total_reads_replica = sum(1 for line in utils.open_file(self.replica_path, self.experiment_format, logger=self.logger))
self.logger.debug("Number lines in experiment A: %s Experiment B: %s"%(self.total_reads_a, self.total_reads_b))
if self.use_replica:
msg = "%s using replicas..."%msg
else:
msg = "%s using swap..."%msg
self.logger.info(msg)
self.average_total_reads = (self.total_reads_a+self.total_reads_b)/2
def enrichment(self):
file_a_reader = file_b_reader = replica_reader = None
self.use_replica = (bool(self.replica_path) or (bool(self.counts_file) and self.use_replica_flag))
self.logger.debug("Use replica: %s"%self.use_replica)
if not USE_MA in self.operations:
_calculate_total_lengths(self)
if not self.counts_file:
file_a_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
file_b_reader = utils.read_fetcher(self.current_control_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.use_replica:
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.sorted_region_path:
self.logger.info('Using region file %s (%s)'%(self.region_path, self.region_format))
else:
calculate_region(self) #create region file semi automatically
self.total_regions = sum(1 for line in open(self.sorted_region_path))
self.logger.info("... analyzing regions, calculating normalized counts, A / M and replica or swap...")
self.already_norm = False
if self.use_MA:
ma_path = self.counts_file
else:
ma_path = self.sorted_region_path
out_path = _calculate_MA(self, ma_path, bool(self.counts_file), 1, 1, file_a_reader, file_b_reader, replica_reader)
self.already_norm = True
self.logger.debug("Already normalized: %s"%self.already_norm)
if self.tmm_norm:
if CHECK_REPLICAS in self.operations:
self.experiment_values = []
self.replica_values = []
self.logger.info("TMM Normalizing...")
tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, False)
replica_tmm_factor = 1
if self.use_replica:
replica_tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, True)
#move output file to old output
#use as input
old_output = '%s/notnormalized_%s'%(self._current_directory(), os.path.basename(self.current_output_path))
move(os.path.abspath(self.current_output_path), old_output)
out_path = _calculate_MA(self, old_output, True, tmm_factor, replica_tmm_factor, True) #recalculate with the new factor, using the counts again
if self.quant_norm:
self.logger.info("Full quantile normalization...")
signal_a = []
signal_prime_1 = []
enrich = []
for line in open(out_path):
sline = line.split()
enrich_line = dict(zip(enrichment_keys, sline))
enrich.append(enrich_line)
signal_a.append(float(enrich_line['signal_a']))
signal_prime_1.append(float(enrich_line['signal_prime_1']))
#full quantile normalization
signal_a.sort()
enrich.sort(key=lambda x:float(x['signal_b']))
quant_counts = open('%s/quantcounts_%s'%(self._current_directory(), os.path.basename(self.current_output_path)), 'w')
for i in range(len(enrich)):
enrich[i]['signal_b'] = signal_a[i]
self.logger.info("Full quantile normalization replica...")
#full quantile normalization of the replica
signal_prime_1.sort()
enrich.sort(key=lambda x:float(x['signal_prime_2']))
for i in range(len(enrich)):
enrich[i]['signal_prime_2'] = signal_prime_1[i]
quant_counts.write("%s\n"%"\t".join(str(enrich[i][key]) for key in enrichment_keys[:20])) #write the lines
quant_counts.flush()
out_path = _calculate_MA(self, quant_counts.name, True, 1, 1, True) #recalculate with the new factor, using the counts again
self._manage_temp_file(quant_counts.name)
self.logger.info("%s regions analyzed."%self.regions_analyzed_count)
if not NOWRITE in self.operations:
self.logger.info("Enrichment result saved to %s"%self.current_output_path)
if CHECK_REPLICAS in self.operations:
check_replica(self)
return out_path
def _sub_tmm(counts_a, counts_b, reads_a, reads_b):
return (counts_a-reads_a)/(counts_a*reads_a) + (counts_b-reads_b)/(counts_b*reads_b)
def calc_tmm_factor(self, file_counts, total_regions, replica):
if replica:
signal_1 = "signal_prime_1"
signal_2 = "signal_prime_2"
M = "M_prime"
reads_2 = self.total_reads_replica
else:
signal_1 = "signal_a"
signal_2 = "signal_b"
M = "M"
reads_2 = self.total_reads_b
values_list = []
#read the file inside the values_list
for line in open(file_counts):
sline = line.split()
values_list.append(dict(zip(enrichment_keys, sline)))
a_trim_number = int(round(total_regions*self.a_trim))
#discard the bad A
self.logger.debug("Removing the worst A (%s regions, %s percent)"%(a_trim_number, self.a_trim*100))
values_list.sort(key=lambda x:float(x["A"])) #sort by A
for i in range (0, a_trim_number):
values_list.pop(0)
values_list.sort(key=lambda x:float(x[M])) #sort by M
m_trim_number = int(round(total_regions*(self.m_trim/2))) #this number is half the value of the flag, because we will trim half below, and half over
#remove on the left
for i in range(0, m_trim_number):
values_list.pop(0)
#remove on the right
for i in range(0, m_trim_number):
values_list.pop(-1)
#now calculate the normalization factor
arriba = 0
abajo = 0
for value in values_list:
w = _sub_tmm(float(value[signal_1]), float(value[signal_2]), self.total_reads_a, reads_2)
arriba += w*float(value[M])
abajo += w
try:
factor = 2**(arriba/abajo)
except ZeroDivisionError:
self.logger.warning("Division by zero, TMM factor could not be calculated.")
factor = 1
if replica:
self.logger.info("Replica TMM Normalization Factor: %s"%factor)
else:
self.logger.info("TMM Normalization Factor: %s"%factor)
return factor
def __load_enrichment_result(values_path):
ret = []
for line in open(values_path):
sline = line.split()
try:
float(sline[1])
ret.append(dict(zip(enrichment_keys, sline)))
except ValueError:
pass
return ret
def calculate_zscore(self, values_path):
num_regions = sum(1 for line in open(values_path))
bin_size = int(self.binsize*num_regions)
if bin_size < 50:
self.logger.warning("The bin size results in a sliding window smaller than 50, adjusting window to 50 in order to get statistically meaningful results.")
bin_size = 50
bin_step = max(1, int(round(self.bin_step*bin_size)))
self.logger.info("Enrichment window calculation using a sliding window size of %s, sliding with a step of %s"%(bin_size, bin_step))
self.logger.info("... calculating zscore...")
enrichment_result = __load_enrichment_result(values_path)
enrichment_result.sort(key= lambda x:(float(x["A_prime"])))
self.logger.debug("Number of loaded counts: %s"%len(enrichment_result))
self.points = []
#get the standard deviations
for i in range(0, num_regions-bin_size+bin_step, bin_step):
#get the slice
if i+bin_size < num_regions:
result_chunk = enrichment_result[i:i+bin_size]
else:
result_chunk = enrichment_result[i:] #last chunk
#retrieve the values
mean_acum = 0
a_acum = 0
Ms_replica = []
for entry in result_chunk:
mean_acum += float(entry["M_prime"])
a_acum += float(entry["A_prime"])
Ms_replica.append(float(entry["M_prime"]))
#add them to the points of mean and sd
mean = mean_acum/len(result_chunk)
sd = math.sqrt((sum((x - mean)**2 for x in Ms_replica))/len(Ms_replica))
#the A median
A_median = a_acum / len(result_chunk)
self.points.append([A_median, mean, sd]) #The A asigned to the window, the mean and the standard deviation
#self.logger.debug("Window of %s length, with A median: %s mean: %s sd: %s"%(len(result_chunk), self.points[-1][0], self.points[-1][1], self.points[-1][2], len(self.points)))
#update z scores
for entry in enrichment_result:
entry["A_median"] = 0
entry["mean"] = 0
entry["sd"] = 0
entry["zscore"] = 0
closest_A = sys.maxint
sd_position = 0
for i in range(0, len(self.points)):
new_A = self.points[i][0]
if new_A != closest_A: #skip repeated points
if abs(closest_A - float(entry["A"])) >= abs(new_A - float(entry["A"])):
closest_A = new_A
sd_position = i
else:
break #already found, no need to go further since the points are ordered
entry["A_median"] = closest_A
if self.points: #only calculate if there where windows...
__sub_zscore(self.sdfold, entry, self.points[sd_position])
if not self.points: # ... otherwise give a warning
self.logger.warning("Insufficient number of regions analyzed (%s), z-score values could not be calculated"%num_regions)
enrichment_result.sort(key=lambda x:(x["name"], int(x["start"]), int(x["end"])))
old_file_path = '%s/before_zscore_%s'%(self._current_directory(), os.path.basename(values_path)) #create path for the outdated file
move(os.path.abspath(values_path), old_file_path) #move the file
new_file = file(values_path, 'w') #open a new file in the now empty file space
if not self.skip_header:
new_file.write('\t'.join(enrichment_keys))
new_file.write('\n')
for entry in enrichment_result:
new_file.write("\t".join(str(entry[key]) for key in enrichment_keys)+"\n")
self._manage_temp_file(old_file_path)
return values_path
def __sub_zscore(sdfold, entry, point):
entry["mean"] = str(point[1])
entry["sd"] = str(point[2])
entry["zscore"] = str(get_zscore(float(entry["M"]), float(entry["mean"]), sdfold*float(entry["sd"])))
def check_replica(self):
#discard everything below the flag
new_experiment = []
new_replica = []
min_value = sys.maxint
max_value = -sys.maxint
for i in range(len(self.replica_values)):
if self.experiment_values[i] > self.count_filter and self.replica_values[i] > self.count_filter:
new_experiment.append(math.log(self.experiment_values[i], 2))
new_replica.append(math.log(self.replica_values[i], 2))
min_value = min(min_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
max_value = max(max_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
#print self.replica_values
self.experiment_values = new_experiment
self.replica_values = new_replica
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, show, xlabel, ylabel, axhline, axis, clf, text, title, xlim, ylim
except:
__matplotlibwarn(self)
return 0
clf()
r_squared = utils.pearson(self.experiment_values, self.replica_values)**2
text(min_value+abs(max_value)*0.1, max_value-abs(max_value)*0.2, r'Pearson $R^2$= %s'%round(r_squared, 3), fontsize=18, bbox={'facecolor':'yellow', 'alpha':0.5, 'pad':10})
xlabel("log2(%s)"%self.experiment_label, fontsize=18)
ylabel("log2(%s)"%self.replica_label, fontsize=18)
xlim(min_value, max_value)
ylim(min_value, max_value)
title(self.title_label, fontsize=24)
plot(self.experiment_values, self.replica_values, '.')
self._save_figure("check_replica")
def check_replica_correlation(self):
"No usado, de momento"
min_tags = 20
experiment_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
correlations_acum = 0
num_correlations = 0
for region_line in open(self.region_path):
sline = region_line.split()
region_experiment = self._region_from_sline(sline)
region_replica = region_experiment.copy()
tags_experiment = experiment_reader.get_overlaping_clusters(region_experiment, overlap=1)
tags_replica = replica_reader.get_overlaping_clusters(region_experiment, overlap=1)
count_experiment = len(tags_experiment)
count_replica = len(tags_replica)
correlations = []
if count_experiment+count_replica > min_tags:
region_experiment.add_tags(tags_experiment, clusterize=True)
region_replica.add_tags(tags_replica, clusterize=True)
num_correlations += 1
correlation = utils.pearson(region_experiment.get_array(), region_replica.get_array())
correlations_acum += max(0, correlation)
correlations.append(correlation)
print correlations_acum/num_correlations
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, boxplot, show, legend, figure, xlabel, ylabel, subplot, axhline, axis
except:
__matplotlibwarn(self)
return 0
print correlations
boxplot(correlations)
self._save_figure("check_replica") | gpl-3.0 |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |
eickenberg/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
broadinstitute/cms | cms/power/power_func.py | 1 | 8625 | ## functions for analyzing empirical/simulated CMS output
## last updated 09.14.2017 vitti@broadinstitute.org
import matplotlib as mp
mp.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import percentileofscore
###################
## DEFINE SCORES ##
###################
def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",):
'''adapted from run_likes_func.py'''
writefile = open(writefilename, 'w')
for score in ['ihs', 'nsl', 'delihh']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
for score in ['xpehh', 'fst', 'deldaf']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
writefile.close()
print("wrote to: " + writefilename)
return
###############
## REGION ID ##
###############
def get_window(istart, physpos, scores, windowlen = 100000):
window_scores = [scores[istart]]
startpos = physpos[istart]
pos = startpos
iscore = istart
while pos < (startpos + windowlen):
iscore += 1
if iscore >= len(scores):
break
window_scores.append(scores[iscore])
pos = physpos[iscore]
#print(str(pos) + " " + str(startpos))
return window_scores
def check_outliers(scorelist, cutoff = 3):
numscores = len(scorelist)
outliers = [item for item in scorelist if item > cutoff]
numoutliers = len(outliers)
percentage = (float(numoutliers) / float(numscores)) * 100.
return percentage
def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000):
'''
previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate
'''
#check window defined by each snp as starting point
rep_percentages = []
numSnps = len(physpos)
numWindows = 0
#get exhaustive windows and stop at chrom edge
for isnp in range(numSnps):
if physpos[isnp] + windowlen < totalchrlen:
numWindows +=1
else:
#print(str(physpos[isnp]) + "\t")
break
for iPos in range(numWindows):
window_scores = get_window(iPos, physpos, scores, windowlen)
percentage = check_outliers(window_scores, cutoff)
rep_percentages.append(percentage)
return rep_percentages
def merge_windows(chrom_signif, windowlen, maxGap = 100000):
print('should implement this using bedtools')
starts, ends = [], []
contig = False
this_windowlen = 0
starting_pos = 0
if len(chrom_signif) > 0:
for i_start in range(len(chrom_signif) - 1):
if not contig:
starts.append(chrom_signif[i_start])
this_windowlen = windowlen #unmerged, default
starting_pos = chrom_signif[i_start]
if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
#or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp
elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap):
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
else:
contig = False
if not contig:
windowend = chrom_signif[i_start] + windowlen
ends.append(windowend)
if contig: #last region is overlapped by its predecssor
ends.append(chrom_signif[-1] + windowlen)
else:
starts.append(chrom_signif[-1])
ends.append(chrom_signif[-1] + windowlen)
assert len(starts) == len(ends)
return starts, ends
##########################
## POWER & SIGNIFICANCE ##
##########################
def calc_pr(all_percentages, threshhold):
numNeutReps_exceedThresh = 0
totalnumNeutReps = len(all_percentages)
for irep in range(totalnumNeutReps):
if len(all_percentages[irep]) != 0:
if max(all_percentages[irep]) > threshhold:
numNeutReps_exceedThresh +=1
numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps)
if totalnumNeutReps != 0:
pr = numNeutReps_exceedThresh / totalnumNeutReps
else:
pr = 0
print('ERROR; empty set')
return pr
def get_causal_rank(values, causal_val):
if np.isnan(causal_val):
return(float('nan'))
assert(causal_val in values)
cleanvals = []
for item in values:
if not np.isnan(item) and not np.isinf(item):
cleanvals.append(item)
values = cleanvals
values.sort()
values.reverse()
causal_rank = values.index(causal_val)
return causal_rank
def get_cdf_from_causal_ranks(causal_ranks):
numbins = max(causal_ranks) #? heuristic
counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck
cdf = np.cumsum(counts)
return bins, cdf
def get_pval(all_simscores, thisScore):
r = np.searchsorted(all_simscores,thisScore)
n = len(all_simscores)
pval = 1. - ((r + 1.) / (n + 1.))
if pval > 0:
#pval *= nSnps #Bonferroni
return pval
else:
#print("r: " +str(r) + " , n: " + str(n))
pval = 1. - (r/(n+1))
#pval *= nSnps #Bonferroni
return pval
###############
## VISUALIZE ##
###############
def quick_plot(ax, pos, val, ylabel,causal_index=-1):
ax.scatter(pos, val, s=.8)
if causal_index != -1:
ax.scatter(pos[causal_index], val[causal_index], color='r', s=4)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize('6')
ax.set_ylabel(ylabel, fontsize='6')
#ax.set_xlim([0, 1500000]) #make flexible?
ax.yaxis.set_label_position('right')
#ax.set_ylim([min(val), max(val)])
return ax
def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000):
#print(allvals)
#get rid of nans and infs
#cleanvals = [item for item in allvals if not np.isnan(item)]
#allvals = cleanvals
allvals = np.array(allvals)
allvals = allvals[~np.isnan(allvals)]
allvals = allvals[~np.isinf(allvals)]
#allvals = list(allvals)
#print(allvals)
print("percentile for score = 10: " + str(percentileofscore(allvals, 10)))
print("percentile for score = 15: " + str(percentileofscore(allvals, 15)))
if len(allvals) > 0:
f, ax = plt.subplots(1)
ax.hist(allvals, bins=numBins)
plt.savefig(savefilename)
print('plotted to ' + savefilename)
return
def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True):
#neut_rep_scores.sort()
#print('sorted neutral scores...')
lastpos = 0
for chrom in range(1,23):
ichrom = chrom-1
if ichrom%2 == 0:
plotcolor = "darkblue"
else:
plotcolor = "lightblue"
if zscores == True:
#http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1
#Z SCORE cf SG email 103116
#pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]]
pvalues = []
for item in emp_scores[ichrom]:
if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant
pval = 1
else:
#print('scipy')
#sys.exit()
pval = scipy.stats.norm.sf(abs(item))
pvalues.append(pval)
#else:
# pval = get_pval(neut_rep_scores, item)
#pvalues.append(pval)
print("calculated pvalues for chrom " + str(chrom))
chrom_pos = range(lastpos, lastpos + len(pvalues))
logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues]
ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5)
lastpos = chrom_pos[-1]
else:
chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom]))
ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5)
lastpos = chrom_pos[-1]
return ax
def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom):
''' makes a figure more like in Karlsson 2013 instead of Grossman 2013'''
ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black")
ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal')
labels = ax.get_yticklabels()
ax.set_yticklabels(labels, fontsize=6)
ax.set_axis_bgcolor('LightGray')
return ax
| bsd-2-clause |
ishanic/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
appapantula/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8574 | # (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from matplotlib.axes import Axes
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert result == expected
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert result == expected
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert result == expected
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert result == expected
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert result == expected
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert result == expected
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert result == expected
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert result == expected
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert result == expected
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert result == expected
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert result == expected
| lgpl-3.0 |
sevenian3/ChromaStarPy | LevelPopsGasServer.py | 1 | 55996 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 14:13:47 2017
@author: ishort
"""
import math
import Useful
import ToolBox
#import numpy
#JB#
#from matplotlib.pyplot import plot, title, show, scatter
#storage for fits (not all may be used)
uw = []
uwa = []
uwb = []
uwStage = []
uwbStage = []
uwu = []
uwl = []
uua=[]
uub=[]
"""
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
#holds the five temperature at which we have partition function data
"""
masterTemp = [130, 500, 3000, 8000, 10000]
#JB#
#def levelPops(lam0In, logNStage, chiL, log10UwStage, gwL, numDeps, temp):
def levelPops(lam0In, logNStage, chiL, logUw, gwL, numDeps, temp):
""" Returns depth distribution of occupation numbers in lower level of b-b transition,
// Input parameters:
// lam0 - line centre wavelength in nm
// logNStage - log_e density of absorbers in relevent ion stage (cm^-3)
// logFlu - log_10 oscillator strength (unitless)
// chiL - energy of lower atomic E-level of b-b transition in eV
// Also needs atsmopheric structure information:
// numDeps
// temp structure """
c = Useful.c()
logC = Useful.logC()
k = Useful.k()
logK = Useful.logK()
logH = Useful.logH()
logEe = Useful.logEe()
logMe = Useful.logMe()
ln10 = math.log(10.0)
logE = math.log10(math.e); #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#//double logNl = logNlIn * ln10; // Convert to base e
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double thisLogUw, Ttheta;
thisLogUw = 0.0 # //default initialization
#logUw = [ 0.0 for i in range(5) ]
logE10 = math.log(10.0)
#print("log10UwStage ", log10UwStage)
#for kk in range(len(logUw)):
# logUw[kk] = logE10*log10UwStage[kk] #// lburns new loop
logGwL = math.log(gwL)
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#////For testing with Ca II lines using gS3 internal line list only:
#//boolean ionized = true;
#//if (ionized) {
#// //System.out.println("ionized, doing chiL - chiI: " + ionized);
#// // chiL = chiL - chiI;
#// chiL = chiL - 6.113;
#// }
#// //
#//Log of line-center wavelength in cm
logLam0 = math.log(lam0In) #// * 1.0e-7);
#// energy of b-b transition
logTransE = logH + logC - logLam0 #//ergs
if (chiL <= 0.0):
chiL = 1.0e-49
logChiL = math.log(chiL) + Useful.logEv() #// Convert lower E-level from eV to ergs
logBoltzFacL = logChiL - Useful.logK() #// Pre-factor for exponent of excitation Boltzmann factor
boltzFacL = math.exp(logBoltzFacL)
boltzFacGround = 0.0 / k #//I know - its zero, but let's do it this way anyway'
#// return a 1D numDeps array of logarithmic number densities
#// level population of lower level of bb transition (could be in either stage I or II!)
logNums = [ 0.0 for i in range(numDeps)]
#double num, logNum, expFac;
#JB#
#print("thisLogUw:",numpy.shape(logUw))
logUwFit = ToolBox.cubicFit(masterTemp,logUw)#u(T) fit
uw.append(logUwFit)
#JB#
for id in range(numDeps):
#//Determine temperature dependenet partition functions Uw:
#Ttheta = 5040.0 / temp[0][id]
#//NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
"""
if (Ttheta >= 1.0):
thisLogUw = logUw[0]
if (Ttheta <= 0.5):
thisLogUw = logUw[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUw = ( logUw[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) \
+ ( logUw[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
thisLogUw = ToolBox.valueFromFit(logUwFit,thisTemp)#u(T) value extrapolated
#JB#
if (thisTemp >= 10000.0):
thisLogUw = logUw[4]
if (thisTemp <= 130.0):
thisLogUw = logUw[0]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUw = logUw[1] * (thisTemp - 130)/(500 - 130) \
+ logUw[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUw = logUw[2] * (thisTemp - 500)/(3000 - 500) \
+ logUw[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUw = logUw[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUw[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUw = logUw[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUw[3] * (10000 - thisTemp)/(10000 - 8000)
"""
#print("logUw ", logUw, " thisLogUw ", thisLogUw)
#//System.out.println("LevPops: ionized branch taken, ionized = " + ionized);
#// Take stat weight of ground state as partition function:
logNums[id] = logNStage[id] - boltzFacL / temp[0][id] + logGwL - thisLogUw #// lower level of b-b transition
#print("LevelPopsServer.stagePops id ", id, " logNStage[id] ", logNStage[id], " boltzFacL ", boltzFacL, " temp[0][id] ", temp[0][id], " logGwL ", logGwL, " thisLogUw ", thisLogUw, " logNums[id] ", logNums[id]);
#// System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id]: " + id + " "
#// + Math.exp(logNums[0][id]) + " "
#// + Math.exp(logNums[1][id]) + " "
#// + Math.exp(logNums[2][id]) + " "
#// + Math.exp(logNums[3][id]));
#//System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id], logNums[4][id]: " + id + " "
#// + logE * (logNums[0][id]) + " "
#// + logE * (logNums[1][id]) + " "
#// + logE * (logNums[2][id]) + " "
# // + logE * (logNums[3][id]) + " "
#// + logE * (logNums[4][id]) );
#//System.out.println("LevelPops: id, logIonFracI, logIonFracII: " + id + " " + logE*logIonFracI + " " + logE*logIonFracII
#// + "logNum, logNumI, logNums[0][id], logNums[1][id] "
#// + logE*logNum + " " + logE*logNumI + " " + logE*logNums[0][id] + " " + logE*logNums[1][id]);
#//System.out.println("LevelPops: id, logIonFracI: " + id + " " + logE*logIonFracI
#// + "logNums[0][id], boltzFacL/temp[0][id], logNums[2][id]: "
#// + logNums[0][id] + " " + boltzFacL/temp[0][id] + " " + logNums[2][id]);
#//id loop
#stop
#print (uw)
return logNums
#//This version - ionization equilibrium *WITHOUT* molecules - logNum is TOTAL element population
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops(logNum, Ne, chiIArr, logUw, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine WITHOUT molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
//
"""
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
thisLogUwA = 0.0 #// element A
#thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
#JB#
uua=[]
#uub=[]
#qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
#end method levelPops
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops2(logNum, Ne, chiIArr, logUw, \
numMols, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
ifMols = True
if (numMols == 0):
ifMols = False
numMols = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 19.0 #//eV
#//Molecular partition functions - default initialization:
#double[] thisLogUwB = new double[numMols];
thisLogUwB = [ 0.0 for i in range(numMols) ]
for iMol in range(numMols):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
if (numMols > 0):
for kk in range(len(logUwA)):
logUwA[kk] = logUw[0][kk]
#// lburns
#//}
#//// Molecular partition functions:
#//Molecular dissociation Boltzmann factors:
boltzFacIAB = [ 0.0 for i in range(numMols) ]
logMolSahaFac = [ 0.0 for i in range(numMols) ]
#//if (numMols > 0){
#double logDissE, logBoltzFacIAB;
for iMol in range(numMols):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//console.log("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//}
#// For molecular species:
logSahaMol = [ 0.0 for i in range(numMols) ]
invSahaMol = [ 0.0 for i in range(numMols) ]
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for iMol in range(numMols):
currentUwBArr=list(logUwB[iMol])#u(T) determined values
UwBFit = ToolBox.cubicFit(masterTemp,currentUwBArr)#u(T) fit
uub.append(UwBFit)
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
for iMol in range(numMols):
thisLogUwB[iMol] = ToolBox.valueFromFit(uub[iMol],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][4]
for iMol in range(numMols):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#// iMol loop
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Molecular Saha factors:
for iMol in range(numMols):
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUwB[iMol] + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//molecular contribution
if (ifMols == True):
for iMol in range(numMols):
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
def stagePops3(logNum, Ne, chiIArr, logUw, numDeps, temp):
#Version for ChromaStarPyGas: logNum is now *neutral stage* population from Phil
# Bennett's GAS package
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent neutral stage number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
#logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
#logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
logNums[0][id] = logNum[id]
for iStg in range(1, numStages):
#print("iStg ", iStg)
thisLogSaha = logSahaFac - logNe - (boltzFacI[iStg-1] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg] - thisLogUw[iStg-1]
#saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
logNums[iStg][id] = logNums[iStg-1][id] + thisLogSaha
#//id loop
return logNums;
#//end method stagePops
#def sahaRHS(chiI, log10UwUArr, log10UwLArr, temp):
def sahaRHS(chiI, logUwU, logUwL, temp):
"""RHS of partial pressure formulation of Saha equation in standard form (N_U*P_e/N_L on LHS)
// Returns depth distribution of LHS: Phi(T) === N_U*P_e/N_L (David Gray notation)
// Input parameters:
// chiI - ground state ionization energy of lower stage
// log10UwUArr, log10UwLArr - array of temperature-dependent partition function for upper and lower ionization stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
//
// Atomic element "A" is the one whose ionization fractions are being computed
// Element "B" refers to array of other species with which A forms molecules "AB" """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUwU = 0.0
thisLogUwL = 0.0
logE10 = math.log(10.0)
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#logUwU = [0.0 for i in range(5)]
#logUwL = [0.0 for i in range(5)]
for kk in range(len(logUwL)):
logUwU[kk] = logUwL[kk]
# logUwL[kk] = logE10*log10UwLArr[kk]
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
#double boltzFacI;
logChiI = math.log(chiI) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI = math.exp(logBoltzFacI)
#//Extra factor of k to get k^5/2 in the P_e formulation of Saha Eq.
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) + Useful.logK()
#//double[] logLHS = new double[numDeps];
#double logLHS;
#// For atomic ionization stages:
#double logSaha, saha, expFac;
#// for (int id = 0; id < numDeps; id++) {
#//
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0]
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (Ttheta <= 0.5):
thisLogUwU = logUwU[1]
thisLogUwL = logUwL[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwU = ( logUwU[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwU[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
thisLogUwL = ( logUwL[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwL[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
currentUwUArr=list(logUwU)#u(T) determined values
UwUFit = ToolBox.cubicFit(masterTemp,currentUwUArr)#u(T) fit
thisLogUwU = ToolBox.valueFromFit(UwUFit,thisTemp)#u(T) value extrapolated
currentUwLArr=list(logUwL)#u(T) determined values
UwLFit = ToolBox.cubicFit(masterTemp,currentUwLArr)#u(T) fit
thisLogUwL = ToolBox.valueFromFit(UwLFit,thisTemp)#u(T) value extrapolated
#JB#
#will need to do this one in Main as it goes through its own loop of temp
#if thisTemp == superTemp[0][len(superTemp[0])]:
# uwu.append(UwUFit)
# uwl.append(UwLFit)
#
#JB#
if (thisTemp <= 130.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (thisTemp >= 10000.0):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwU = logUwU[1] * (thisTemp - 130)/(500 - 130) \
+ logUwU[0] * (500 - thisTemp)/(500 - 130)
thisLogUwL = logUwL[1] * (thisTemp - 130)/(500 - 130) \
+ logUwL[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwU = logUwU[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwU[1] * (3000 - thisTemp)/(3000 - 500)
thisLogUwL = logUwL[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwL[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwU = logUwU[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwU[2] * (8000 - thisTemp)/(8000 - 3000)
thisLogUwL = logUwL[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwL[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwU = logUwU[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwU[3] * (10000 - thisTemp)/(10000 - 8000)
thisLogUwL = logUwL[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwL[3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
#//Ionization stage Saha factors:
#//Need T_kin^5/2 in the P_e formulation of Saha Eq.
logSaha = logSahaFac - (boltzFacI /temp[0]) + (5.0 * temp[1] / 2.0) + thisLogUwU - thisLogUwL
#// saha = Math.exp(logSaha);
#//logLHS[id] = logSaha;
logLHS = logSaha;
#// } //id loop
return logLHS;
#JB
#return [logLHS,[[UwUFit,thisLogUwU],[UwLFit,thisLogUwL]]]
#//
# } //end method sahaRHS
#def molPops(nmrtrLogNumB, nmrtrDissE, log10UwA, nmrtrLog10UwB, nmrtrLogQwAB, nmrtrLogMuAB, \
# numMolsB, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# logGroundRatio, numDeps, temp):
def molPops(nmrtrLogNumB, nmrtrDissE, logUwA, nmrtrLogUwB, nmrtrLogQwAB, nmrtrLogMuAB, \
numMolsB, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
logGroundRatio, numDeps, temp):
# line 1: //species A data - ionization equilibrium of A
# //data for set of species "B" - molecular equlibrium for set {AB}
"""Diatomic molecular equilibrium routine that accounts for molecule formation:
// Returns depth distribution of molecular population
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
//
// Atomic element "A" is the one kept on the LHS of the master fraction, whose ionization fractions are included
// in the denominator of the master fraction
// Element "B" refers to array of other sintpecies with which A forms molecules "AB" """
logE = math.log10(math.e) #// for debug output
#//System.out.println("molPops: nmrtrDissE " + nmrtrDissE + " log10UwA " + log10UwA[0] + " " + log10UwA[1] + " nmrtrLog10UwB " +
#// nmrtrLog10UwB[0] + " " + nmrtrLog10UwB[1] + " nmrtrLog10QwAB " + logE*nmrtrLogQwAB[2] + " nmrtrLogMuAB " + logE*nmrtrLogMuAB
#// + " numMolsB " + numMolsB + " dissEArr " + dissEArr[0] + " log10UwBArr " + log10UwBArr[0][0] + " " + log10UwBArr[0][1] + " log10QwABArr " +
#// logE*logQwABArr[0][2] + " logMuABArr " + logE*logMuABArr[0]);
#//System.out.println("Line: nmrtrLog10UwB[0] " + logE*nmrtrLog10UwB[0] + " nmrtrLog10UwB[1] " + logE*nmrtrLog10UwB[1]);
ln10 = math.log(10.0)
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
logE10 = math.log(10.0)
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
if (numMolsB == 0):
numMolsB = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 29.0 #//eV
#//var molPops = function(logNum, numeratorLogNumB, numeratorDissE, numeratorLog10UwA, numeratorLog10QwAB, numeratorLogMuAB, //species A data - ionization equilibrium of A
#//Molecular partition functions - default initialization:
thisLogUwB = [0.0 for i in range(numMolsB)]
for iMol in range(numMolsB):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
nmrtrThisLogUwB = 0.0 #// element A
thisLogQwAB = math.log(300.0)
nmrtrThisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#logUwA = [0.0 for i in range(5)]
#nmrtrLogUwB = [0.0 for i in range(5)]
#for kk in range(len(logUwA)):
#logUwA[kk] = logE10*log10UwA[kk]
#nmrtrLogUwB[kk] = logE10*nmrtrLog10UwB[kk]
#// lburns
#// Array of elements B for all molecular species AB:
#double[][] logUwB = new double[numMolsB][2];
#logUwB = [ [ 0.0 for i in range(5) ] for j in range(numMolsB) ]
#//if (numMolsB > 0){
#for iMol in range(numMolsB):
# for kk in range(5):
# logUwB[iMol][kk] = logE10*log10UwBArr[iMol][kk]
# // lburns new loop
#//}
#// Molecular partition functions:
#// double nmrtrLogQwAB = logE10*nmrtrLog10QwAB;
#// double[] logQwAB = new double[numMolsB];
#// //if (numMolsB > 0){
#// for (int iMol = 0; iMol < numMolsB; iMol++){
#// logQwAB[iMol] = logE10*log10QwABArr[iMol];
#// }
# //}
#//Molecular dissociation Boltzmann factors:
nmrtrBoltzFacIAB = 0.0
nmrtrLogMolSahaFac = 0.0
logDissE = math.log(nmrtrDissE) + Useful.logEv()
#//System.out.println("logDissE " + logE*logDissE)
logBoltzFacIAB = logDissE - Useful.logK()
#//System.out.println("logBoltzFacIAB " + logE*logBoltzFacIAB);
nmrtrBoltzFacIAB = math.exp(logBoltzFacIAB)
nmrtrLogMolSahaFac = (3.0 / 2.0) * (log2pi + nmrtrLogMuAB + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
#//System.out.println("nmrtrDissE " + nmrtrDissE + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrLogMuAB " + logE*nmrtrLogMuAB + " nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
boltzFacIAB = [0.0 for i in range(numMolsB)]
logMolSahaFac = [0.0 for i in range(numMolsB)]
#//if (numMolsB > 0){
for iMol in range(numMolsB):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#//System.out.println("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//double[] logNums = new double[numDeps]
#//}
#// For molecular species:
#double nmrtrSaha, nmrtrLogSahaMol, nmrtrLogInvSahaMol; //, nmrtrInvSahaMol;
logMolFrac = [0.0 for i in range(numDeps)]
logSahaMol = [0.0 for i in range(numMolsB)]
invSahaMol = [0.0 for i in range(numMolsB)]
#JB#
currentUwAArr=list(logUwA)#u(T) determined values
UwAFit = ToolBox.cubicFit(masterTemp, currentUwAArr)#u(T) fit
nmrtrLogUwBArr=list(nmrtrLogUwB)#u(T) determined values
nmrtrLogUwBFit = ToolBox.cubicFit(masterTemp, nmrtrLogUwBArr)#u(T) fit
#uwa.append(UwAFit)
#uwb.append(nmrtrLogUwBFit)
uwbFits=[]
qwabFit = []
for iMol in range(numMolsB):
currentUwBArr=list(logUwB[iMol])
UwBFit = ToolBox.cubicFit(masterTemp, currentUwBArr)
uwbFits.append(UwBFit)
currentLogQwABArr=list(logQwABArr[iMol])#u(T) determined values
QwABFit = ToolBox.cubicFit(masterTemp, currentLogQwABArr)#u(T) fit
qwabFit.append(QwABFit)
#nmrtrQwABArr=list(nmrtrLogQwAB)#u(T) determined values
#nmrtrQwABFit = ToolBox.cubicFit(masterTemp, nmrtrQwABArr)#u(T) fit
#for Mols in range(numMolsB):
# currentLogUwBArr=list(logUwB[Mols])#u(T) determined values
# UwBFit=cubicFit(masterTemp,currentLogUwBArr)#u(T) fit
#JB#
#//
temps=[]
#valb=[]
#vala=[]
#valnb=[]
#valqab=[]
#valnmrtrqwb=[]
#// System.out.println("molPops: id nmrtrLogNumB logNumBArr[0] logGroundRatio");
for id in range(numDeps):
#//System.out.format("%03d, %21.15f, %21.15f, %21.15f, %n", id, logE*nmrtrLogNumB[id], logE*logNumB[0][id], logE*logGroundRatio[id]);
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//Determine temparature dependent partition functions Uw:
thisTemp = temp[0][id]
temps.append(thisTemp)
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (Ttheta <= 0.5):
thisLogUwA = logUwA[1]
nmrtrThisLogUwB = nmrtrLogUwB[1]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwA = ( logUwA[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwA[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
nmrtrThisLogUwB = ( nmrtrLogUwB[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( nmrtrLogUwB[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
for iMol in range(numMolsB):
thisLogUwB[iMol] = ( logUwB[iMol][1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwB[iMol][0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
"""
#JB#
thisLogUwA = float(ToolBox.valueFromFit(UwAFit,thisTemp))#u(T) value extrapolated
#vala.append(thisLogUwA)
nmrtrThisLogUwB = float(ToolBox.valueFromFit(nmrtrLogUwBFit,thisTemp))#u(T) value extrapolated
#valnb.append(nmrtrThisLogUwB)
#for iMol in range(numMolsB):
# thisLogUwB[iMol]=logUwB[iMol]
for iMol in range(numMolsB):
thisLogUwB[iMol] = ToolBox.valueFromFit(uwbFits[iMol],thisTemp)#u(T) value extrapolated
#valb.append(thisLogUwB[iMol])
#// NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
if (thisTemp <= 130.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwA = logUwA[1] * (thisTemp - 130)/(500 - 130) \
+ logUwA[0] * (500 - thisTemp)/(500 - 130)
nmrtrThisLogUwB = nmrtrLogUwB[1] * (thisTemp - 130)/(500 - 130) \
+ nmrtrLogUwB[0] * (500 - thisTemp)/(500 - 130)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1] * (thisTemp - 130)/(500 - 130) \
+ logUwB[iMol][0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwA = logUwA[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwA[1] * (3000 - thisTemp)/(3000 - 500)
nmrtrThisLogUwB = nmrtrLogUwB[2] * (thisTemp - 500)/(3000 - 500) \
+ nmrtrLogUwB[1] * (3000 - thisTemp)/(3000 - 500)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][2] * (thisTemp - 500)/(3000 - 500) \
+ logUwB[iMol][1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwA = logUwA[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwA[2] * (8000 - thisTemp)/(8000 - 3000)
nmrtrThisLogUwB = nmrtrLogUwB[3] * (thisTemp - 3000)/(8000 - 3000) \
+ nmrtrLogUwB[2] * (8000 - thisTemp)/(8000 - 3000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwB[iMol][2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwA = logUwA[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwA[3] * (10000 - thisTemp)/(10000 - 8000)
nmrtrThisLogUwB = nmrtrLogUwB[4] * (thisTemp - 8000)/(10000 - 8000) \
+ nmrtrLogUwB[3] * (10000 - thisTemp)/(10000 - 8000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwB[iMol][3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
#iMol loops for Q's
for iMol in range(numMolsB):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
if (thisTemp < 3000.0):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( nmrtrLogQwAB[2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( nmrtrLogQwAB[3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( nmrtrLogQwAB[4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
# //Ionization stage Saha factors:
#//System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id]);
# // if (id == 16){
# // System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id] + " pp nmrtB " + (logE*(nmrtrLogNumB[id]+temp[1][id]+Useful.logK())) + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrLogQwAB " + logE*nmrtrThisLogQwAB);
# //System.out.println("nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + logE*nmrtrThisLogQwAB);
# // }
nmrtrLogSahaMol = nmrtrLogMolSahaFac - nmrtrLogNumB[id] - (nmrtrBoltzFacIAB / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + nmrtrThisLogUwB + thisLogUwA - nmrtrThisLogQwAB
nmrtrLogInvSahaMol = -1.0 * nmrtrLogSahaMol
#//System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#//nmrtrInvSahaMol = Math.exp(nmrtrLogSahaMol);
#// if (id == 16){
#// System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#// }
#// if (id == 16){
#// System.out.println("nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + nmrtrThisLogQwAB);
#// System.out.println("nmrtrLogSahaMol " + logE*nmrtrLogSahaMol); // + " nmrtrInvSahaMol " + nmrtrInvSahaMol);
#// }
#//Molecular Saha factors:
for iMol in range(numMolsB):
#//System.out.println("iMol " + iMol + " id " + id + " logNumB[iMol][id] " + logE*nmrtrLogNumB[id]);
#//System.out.println("iMol " + iMol + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " thisLogUwA " + logE*thisLogUwA + " thisLogQwAB " + logE*thisLogQwAB);
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + float(thisLogUwB[iMol]) + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//TEST invSahaMol[iMol] = 1.0e-99; //test
#// if (id == 16){
#// System.out.println("iMol " + iMol + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " logQwAB[iMol] " + logE*thisLogQwAB + " logNumB[iMol][id] " + logE*logNumB[iMol][id] + " logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#// System.out.println("iMol " + iMol + " logSahaMol " + logE*logSahaMol[iMol] + " invSahaMol[iMol] " + invSahaMol[iMol]);
#// }
#//Compute log of denominator is ionization fraction, f_stage
# //default initialization
# // - ratio of total atomic particles in all ionization stages to number in ground state:
denominator = math.exp(logGroundRatio[id]) #//default initialization - ratio of total atomic particles in all ionization stages to number in ground state
#//molecular contribution
for iMol in range(numMolsB):
#// if (id == 16){
#// System.out.println("invSahaMol[iMol] " + invSahaMol[iMol] + " denominator " + denominator);
#// }
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
#//System.out.println("logGroundRatio[id] " + logE*logGroundRatio[id] + " logDenominator " + logE*logDenominator);
#// if (id == 16){
#// System.out.println("id " + id + " logGroundRatio " + logGroundRatio[id] + " logDenominator " + logDenominator);
#// }
#//if (id == 36){
#// System.out.println("logDenominator " + logE*logDenominator);
#// }
#//var logDenominator = Math.log( 1.0 + saha21 + (saha32 * saha21) + (saha43 * saha32 * saha21) + (saha54 * saha43 * saha32 * saha21) );
logMolFrac[id] = nmrtrLogInvSahaMol - logDenominator
#// if (id == 16){
#// System.out.println("id " + id + " logMolFrac[id] " + logE*logMolFrac[id]);
#// }
#//logNums[id] = logNum[id] + logMolFrac;
#} //id loop
#JB - check (never used)#
#print(uwa)
#print(uwb)
#title("logUwA")
"""
plot(temps,vala)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwAFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogUwB")
plot(temps,valnb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrLogUwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logUwB")
plot(temps,valb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logQwAB")
plot(temps,valqab)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(QwABFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogQwAB")
plot(temps,valnmrtrqwb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrQwABFit,t))
scatter(masterTemp,(tempT))
show()
"""
#JB#
return logMolFrac
#//end method stagePops | mit |
sjperkins/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
cbertinato/pandas | pandas/tests/test_downstream.py | 1 | 4179 | """
Testing that we work in the downstream packages
"""
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="pandas-wheels-50", strict=False)
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
# -S : disable site-packages
# -s : disable user site-packages
# -E : disable PYTHON* env vars, especially PYTHONPATH
# And, that's apparently not enough, so we give up.
# https://github.com/MacPython/pandas-wheels/pull/50
call = ['python', '-sSE', '-c', 'import pandas']
with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
for name in ['numpy', 'pytz', 'dateutil']:
assert name in output
| bsd-3-clause |
cgrima/rsr | rsr/fit.py | 1 | 4401 | """
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method == 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[np.isfinite(sample)]
if len(sample) == 0:
bad = True
sample = np.zeros(10)+1
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
# plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, .5, 10, None)
prm0.add('pt', np.average(sample)**2,False, 0, 1, 'a**2+2*s**2*mu')
#if fit_model == 'hk':
# # From [Dutt and Greenleaf. 1994, eq.14]
# prm0.add('a4', np.average(sample)**4,False, 0, 1,
# '8*(1+1/mu)*s**4 + 8*s**2*s**2 + a**4')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
values = {}
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# Identify bad results
if bad is True:
result.success = False
result.values['a'] = 0
result.values['s'] = 0
result.values['mu'] = 0
result.values['pt'] = 0
result.chisqr = 0
result.redchi = 0
result.message = 'No valid data in the sample'
result.residual = 0
return result
| mit |
gpersistence/tstop | python/persistence/PartitionData.py | 1 | 8153 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import os
import sys
import argparse
from math import ceil
import numpy
from sklearn.cross_validation import StratifiedKFold
from Datatypes.JSONObject import load_data, save_data
from Datatypes.Segments import SegmentInfo
from Datatypes.Configuration import Configuration
from Datatypes.TrainTestPartitions import TrainTestPartition, TrainTestPartitions
def PartitionData(segment_info, split, avoid_overlap=False, segment_size=0,
file_based=False, preserve_labels=False, override_preset=False, surpress_warning=False, seed=None) :
'''
Accepts a list of Datatype.Segments.SegmentInfo and a float between 0 and 1,
and outputs a pair of lists of indices, (train, test) corresponding
to a parition of the input list
len(train) approximates split * len(segment_info)
Intersection of train and test is an empty set
Union of train and test is not guaranteed to be range(len(segment_info))
Optional arguments:
avoid_overlap omits entries in test that would have overlapping data with entries in train,
as indicated by the range [segment_start:segment_start+segment_size]
segment_size interacts with avoid overlap, because only segment_start is contained in the
SegmentInfo class
file_based creates partitions where segments with the same filename for source data are
in the same partition
preserve_label tries to split the populations of labels evenly
'''
segment_count = len(segment_info)
segment_range = range(segment_count)
# check to see if we have a preset train / test split for all data and we aren't overriding that
if not override_preset and [0 for s in segment_info if s.learning == None] == [] :
return TrainTestPartition([i for i in segment_range if segment_info[i].learning == 'train'],
[i for i in segment_range if segment_info[i].learning == 'test'], None)
train_goal_len = int(ceil(segment_count * split))
if preserve_labels :
labels = [s.max_label() for s in segment_info]
label_set = list(set(labels))
label_count = [(l0,len([l for l in labels if l == l0])) for l0 in label_set]
label_goal = [(str(l), int(round(c * split))) for (l,c) in label_count]
for ((l0,g),(l1,c)) in zip(label_goal, label_count) :
if (g == 0) or (g == c) and not surpress_warning:
print "PartitionData warning: not enough entries (%d) of label %s to properly make a train / test split of ratio %s" % (c, l0, split)
label_goal = dict(label_goal)
train = []
test = []
if seed != None :
random.seed(seed)
state = random.getstate()
if file_based :
files = list(set([s.filename for s in segment_info]))
random.shuffle(files)
for f in files :
f_indices = [x for (x,y) in zip(segment_range, segment_info) if y.filename == f]
if preserve_labels :
f_labels = [str(labels[i]) for i in f_indices]
extend_train = True
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
if count > label_goal[l] :
extend_train = False
break
if extend_train :
train.extend(f_indices)
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
label_goal[l] = label_goal[l] - count
else :
test.extend(f_indices)
else :
if len(train) + len(f_indices) < train_goal_len :
train.extend(f_indices)
else :
test.extend(f_indices)
else :
random.shuffle(segment_range)
if preserve_labels :
for i in segment_range:
l = str(labels[i])
if label_goal[l] > 0 :
train.append(i)
label_goal[l] = label_goal[l] - 1
else :
test.append(i)
else :
train = segment_range[0:train_goal_len]
test = segment_range[train_goal_len:]
return TrainTestPartition(train,test,state)
def generate_partitions(config, segment_info, cv_iterations=0, seed=None) :
partition = PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=seed)
all_labels = [segment_info[i].max_label() for i in partition.train]
if cv_iterations > 0 :
skf = StratifiedKFold(all_labels, n_folds=cv_iterations)
cross_validation = [TrainTestPartition([partition.train[i] for i in train_index],
[partition.train[i] for i in test_index], None) \
for train_index, test_index in skf]
else :
cross_validation = None
learning_trials = [PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=None) for i in range(config.learning_iterations)]
return TrainTestPartitions(config, segment_info, cross_validation, learning_trials)
if __name__ == "__main__" :
parser = argparse.ArgumentParser("Tool to generate train / test splits for testing and cross validation")
parser.add_argument("--segments", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--learning-split", "-s", type=float)
parser.add_argument("--learning-iterations", "-I", type=int)
parser.add_argument("--cv-iterations", "-v", default=5, type=int)
parser.add_argument("--seed", "-S")
args = parser.parse_args(sys.argv[1:])
segments_json = load_data(args.segments, 'segments', None, None, sys.argv[0] + " : ")
if segments_json == None :
print "Could not load Segments from %s" % (args.segments,)
sys.exit(1)
segment_info = [SegmentInfo.fromJSONDict(s) for s in segments_json['segments']]
config = Configuration.fromJSONDict(segments_json['config'])
if args.learning_split != None :
config.learning_split = args.learning_split
if args.learning_iterations != None :
config.learning_iterations = args.learning_iterations
output = generate_partitions(config, segment_info, cv_iterations=args.cv_iterations, seed=args.seed)
if args.outfile == None :
args.outfile = TrainTestPartitions.get_partition_filename(config)
print "Writing %s" % (args.outfile,)
save_data(args.outfile, output.toJSONDict())
| gpl-3.0 |
rgommers/numpy | numpy/core/numeric.py | 7 | 76727 | import functools
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
asfortranarray, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
from . import overrides
from . import umath
from . import shape_base
from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.zeros_like(y)
array([0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty : Return a new uninitialized array.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
if like is not None:
return _ones_with_like(shape, dtype=dtype, order=order, like=like)
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
_ones_with_like = array_function_dispatch(
_ones_dispatcher
)(ones)
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.ones_like(y)
array([1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
_full_with_like = array_function_dispatch(
_full_dispatcher
)(full)
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
keepdims : bool, optional
If this is set to True, the axes that are counted are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.19.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> a = np.array([[0, 1, 7, 0],
... [3, 0, 2, 19]])
>>> np.count_nonzero(a)
5
>>> np.count_nonzero(a, axis=0)
array([1, 1, 2, 1])
>>> np.count_nonzero(a, axis=1)
array([2, 3])
>>> np.count_nonzero(a, axis=1, keepdims=True)
array([[2],
[3]])
"""
if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
# TODO: this works around .astype(bool) not working properly (gh-9847)
if np.issubdtype(a.dtype, np.character):
a_bool = a != a.dtype.type()
else:
a_bool = a.astype(np.bool_, copy=False)
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
def isfortran(a):
"""
Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Returns
-------
isfortran : bool
Returns True if the array is Fortran contiguous but *not* C contiguous.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
def _argwhere_dispatcher(a):
return (a,)
@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
non-zero items.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
# nonzero does not behave well on 0d, so promote to 1d
if np.ndim(a) == 0:
a = shape_base.atleast_1d(a)
# then remove the added dimension
return argwhere(a)[:,:0]
return transpose(nonzero(a))
def _flatnonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return np.nonzero(np.ravel(a))[0]
def _correlate_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
return multiarray.correlate2(a, v, mode)
def _convolve_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
return multiarray.correlate(a, v[::-1], mode)
def _outer_dispatcher(a, b, out=None):
return (a, b, out)
@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to dimensions other than 1D and other
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
is the equivalent.
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
def _tensordot_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing
two array_like objects, ``(a_axes, b_axes)``, sum the products of
`a`'s and `b`'s elements (components) over the axes specified by
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Returns
-------
output : ndarray
The tensor dot product of the input.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
The shape of the result consists of the non-contracted axes of the
first tensor, followed by the non-contracted axes of the second.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]])
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([['a', 'b'],
['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
array([[['acc', 'bdd'],
['aaacccc', 'bbbdddd']],
[['aaaaacccccc', 'bbbbbdddddd'],
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[['a', 'b'],
['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
array([[['abbbbb', 'cddddd'],
['aabbbbbb', 'ccdddddd']],
[['aaabbbbbbb', 'cccddddddd'],
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[['abb', 'cdd'],
['aaabbbb', 'cccdddd']],
[['aaaaabbbbbb', 'cccccdddddd'],
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def _roll_dispatcher(a, shift, axis=None):
return (a,)
@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, -1)
array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
>>> np.roll(x2, -1, axis=1)
array([[1, 2, 3, 4, 0],
[6, 7, 8, 9, 5]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def _rollaxis_dispatcher(a, axis, start=None):
return (a,)
@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
This function continues to be supported for backward compatibility, but you
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
1.11.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
When ``start <= axis``, the axis is rolled back until it lies in
this position. When ``start > axis``, the axis is rolled until it
lies before this position. The default, 0, results in a "complete"
roll. The following table describes how negative values of ``start``
are interpreted:
.. table::
:align: left
+-------------------+----------------------+
| ``start`` | Normalized ``start`` |
+===================+======================+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
+-------------------+----------------------+
| ``-arr.ndim`` | 0 |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``-1`` | ``arr.ndim-1`` |
+-------------------+----------------------+
| ``0`` | ``0`` |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``arr.ndim`` | ``arr.ndim`` |
+-------------------+----------------------+
| ``arr.ndim + 1`` | raise ``AxisError`` |
+-------------------+----------------------+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def _moveaxis_dispatcher(a, source, destination):
return (a,)
@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded:: 1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose : Permute the dimensions of an array.
swapaxes : Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
return (a, b)
@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@set_module('numpy')
def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res
def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
_fromfunction_with_like = array_function_dispatch(
_fromfunction_dispatcher
)(fromfunction)
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@set_module('numpy')
def isscalar(element):
"""
Returns True if the type of `element` is a scalar type.
Parameters
----------
element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `element` is a scalar type, False if it is not.
See Also
--------
ndim : Get the number of dimensions of an array
Notes
-----
If you need a stricter way to identify a *numerical* scalar, use
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
non-numerical elements such as strings.
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
as that will also return true for 0d arrays. This is how numpy overloads
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
+======================================+===============+===================+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
| builtins) | | |
+--------------------------------------+---------------+-------------------+
| builtin string and buffer objects | ``True`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other builtin objects, like | ``False`` | ``True`` |
| `pathlib.Path`, `Exception`, | | |
| the result of `re.compile` | | |
+--------------------------------------+---------------+-------------------+
| third-party objects like | ``False`` | ``True`` |
| `matplotlib.figure.Figure` | | |
+--------------------------------------+---------------+-------------------+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other numpy arrays | ``False`` | ``False`` |
+--------------------------------------+---------------+-------------------+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
| objects | | |
+--------------------------------------+---------------+-------------------+
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar(np.array(3.1))
False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
>>> np.isscalar('numpy')
True
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
>>> np.isscalar(Number())
True
"""
return (isinstance(element, generic)
or type(element) in ScalarType
or isinstance(element, numbers.Number))
@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def _identity_dispatcher(n, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if like is not None:
return _identity_with_like(n, dtype=dtype, like=like)
from numpy import eye
return eye(n, dtype=dtype, like=like)
_identity_with_like = array_function_dispatch(
_identity_dispatcher
)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any, equal
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
The comparison of `a` and `b` uses standard broadcasting, which
means that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for
`equal` but not `array_equal`.
`allclose` is not defined for non-numeric data types.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
math.isclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero value
for `atol` will result in `False` if either `a` or `b` is zero.
`isclose` is not defined for non-numeric data types.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dt = multiarray.result_type(y, 1.)
y = asanyarray(y, dtype=dt)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
# Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
return cond[()] # Flatten 0d arrays to scalars
def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
.. versionadded:: 1.19.0
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
multiarray.broadcast(a1, a2)
except Exception:
return False
return bool(asarray(a1 == a2).all())
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
def extend_all(module):
existing = set(__all__)
mall = getattr(module, '__all__')
for a in mall:
if a not in existing:
__all__.append(a)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
from . import _asarray
from ._asarray import *
from . import _ufunc_config
from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
extend_all(_asarray)
extend_all(_ufunc_config)
| bsd-3-clause |
edonyM/emthesis | code/3point2plane.py | 1 | 3545 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-11-30 16:04
#
# Filename: 3point2plane.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('3 point into plane')
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = 5 - 0.3*X + 0.48*Y
p1 = [5.3, 0.1, 5-0.3*5.3+0.48*0.1]
p2 = [2.3, 0.7, 5-0.3*2.3+0.48*0.7]
p3 = [8.3, 3.1, 5-0.3*8.3+0.48*3.1]
ax.plot_surface(X, Y, Z, rstride=100, cstride=100, alpha=0.3)
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.scatter(p3[0], p3[1], p3[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
CVML/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
abhijeet-talaulikar/Automatic-Helmet-Detection | K-Fold/Logistic_Regression.py | 1 | 2663 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import *
from timeit import default_timer as timer
from random import randint
from sklearn.feature_selection import *
from sklearn.decomposition import PCA
helmet_data = np.genfromtxt ('helmet.csv', delimiter=",")
face_data = np.genfromtxt ('face.csv', delimiter=",")
data_full = np.concatenate((helmet_data, face_data), 0)
np.random.shuffle(data_full) #shuffle the tuples
#feature reduction (on HOG part)
#gain, j = mutual_info_classif(data_full[:, 8:-1], data_full[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0
#for i in np.arange(len(gain)):
# if gain[i] <= 0.001:
# data_full = np.delete(data_full, 8+i-j, 1)
# j += 1
#data = np.copy(data_full)
#principal component analysis
pca = PCA(n_components=150)
data = pca.fit_transform(data_full[:, 8:-1])
data = np.concatenate((data_full[:, 0:8], data, np.array([data_full[:, -1]]).T), axis=1)
precision, recall, f1, accuracy, support, fn, roc_auc = 0, 0, 0, 0, 0, 0, 0
colors = ['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange']
k = 10
kf = KFold(n_splits = k)
start = timer()
for train, test in kf.split(data):
X_train, X_test = data[train, 0:-1], data[test, 0:-1]
y_train, y_test = data[train, -1], data[test, -1]
clf = LogisticRegression().fit(X_train, y_train)
y_pred = clf.predict(X_test)
#ROC curve
y_prob = clf.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1)
roc_auc += auc(fpr, tpr)
plt.plot(fpr, tpr, color=colors[randint(0, len(colors)-1)])
precision += precision_score(y_test, y_pred, average = 'macro')
recall += recall_score(y_test, y_pred, average = 'macro')
f1 += f1_score(y_test, y_pred, average = 'macro')
accuracy += accuracy_score(y_test, y_pred)
y = y_test - y_pred
fn += sum(y[y > 0]) / len(y_test)
end = timer()
precision /= k
recall /= k
f1 /= k
accuracy /= k
fn /= k
print("Precision \t: %s" % round(precision, 4))
print("Recall \t\t: %s" % round(recall, 4))
print("F1 \t\t: %s" % round(f1, 4))
print("Accuracy \t: %s" % round(accuracy, 4))
print("False Neg \t: %s%%" % round(fn * 100, 4))
print("Mean AUC \t: %s" % round(roc_auc / k, 4))
print("\nExecution time: %s ms" % round((end - start) * 1000, 4))
#ROC curve
plt.title('Logistic Regression (AUC = %s)' % round(roc_auc, 4))
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.05,1.0])
plt.ylim([0.0,1.05])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| gpl-3.0 |
hansonrobotics/chatbot | src/chatbot/stats.py | 1 | 3618 | import os
import logging
import pandas as pd
import glob
import re
import datetime as dt
from collections import Counter
logger = logging.getLogger('hr.chatbot.stats')
trace_pattern = re.compile(
r'../(?P<fname>.*), (?P<tloc>\(.*\)), (?P<pname>.*), (?P<ploc>\(.*\))')
def collect_history_data(history_dir, days):
today = dt.datetime.utcnow()
dfs = []
for d in glob.glob('{}/*'.format(history_dir)):
if os.path.isdir(d):
dirname = os.path.basename(d)
dirdate = None
try:
dirdate = dt.datetime.strptime(dirname, '%Y%m%d')
except Exception as ex:
logger.error(ex)
if dirdate and (days == -1 or (today - dirdate).days < days):
for fname in glob.glob('{}/{}/*.csv'.format(history_dir, dirname)):
try:
dfs.append(pd.read_csv(fname))
except Exception as ex:
logger.warn("Reading {} error: {}".format(fname, ex))
if not dfs:
return None
df = pd.concat(dfs, ignore_index=True)
df = df[df.Datetime != 'Datetime'].sort(
['User', 'Datetime']).drop_duplicates()
return df
def history_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
if days == -1:
stats_csv = '{}/full_history.csv'.format(history_dir)
else:
stats_csv = '{}/last_{}_days.csv'.format(history_dir, days)
columns = [u'Datetime', u'Revision', u'User', u'BotName',
u'AnsweredBy', u'Question', u'Answer', u'Rate', u'Trace']
df.to_csv(stats_csv, index=False, columns=columns)
logger.info("Write statistic records to {}".format(stats_csv))
records = len(df)
rates = len(df[df.Rate.notnull()])
good_rates = len(df[df.Rate.isin(['good'])])
bad_rates = len(df[df.Rate.isin(['bad'])])
if records > 0:
csd = float(records - bad_rates) / records
response = {
'customers_satisfaction_degree': csd,
'number_of_records': records,
'number_of_rates': rates,
'number_of_good_rates': good_rates,
'number_of_bad_rates': bad_rates,
}
return response
def playback_history(df):
from client import Client
client = Client(os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC'), test=True)
pattern_column = []
for question in df.Question:
answer = client.ask(question, True)
traces = answer.get('trace')
patterns = []
if traces:
for trace in traces:
match_obj = trace_pattern.match(trace)
if match_obj:
patterns.append(match_obj.group('pname'))
pattern_column.append(patterns)
df.loc[:,'Pattern'] = pd.Series(pattern_column, index=df.index)
return df
def pattern_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
df = playback_history(df)
patterns = sum(df.Pattern, [])
counter = Counter(patterns)
pattern_freq = pd.Series(counter)
pattern_freq.sort(ascending=False)
stats_csv = '{}/pattern_frequency.csv'.format(history_dir)
pattern_freq.to_csv(stats_csv)
logger.info("Write pattern statistic to {}".format(stats_csv))
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), 7)
pattern_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
| mit |
eteq/bokeh | examples/interactions/interactive_bubble/data.py | 49 | 1265 | import numpy as np
from bokeh.palettes import Spectral6
def process_data():
from bokeh.sampledata.gapminder import fertility, life_expectancy, population, regions
# Make the column names ints not strings for handling
columns = list(fertility.columns)
years = list(range(int(columns[0]), int(columns[-1])))
rename_dict = dict(zip(columns, years))
fertility = fertility.rename(columns=rename_dict)
life_expectancy = life_expectancy.rename(columns=rename_dict)
population = population.rename(columns=rename_dict)
regions = regions.rename(columns=rename_dict)
# Turn population into bubble sizes. Use min_size and factor to tweak.
scale_factor = 200
population_size = np.sqrt(population / np.pi) / scale_factor
min_size = 3
population_size = population_size.where(population_size >= min_size).fillna(min_size)
# Use pandas categories and categorize & color the regions
regions.Group = regions.Group.astype('category')
regions_list = list(regions.Group.cat.categories)
def get_color(r):
return Spectral6[regions_list.index(r.Group)]
regions['region_color'] = regions.apply(get_color, axis=1)
return fertility, life_expectancy, population_size, regions, years, regions_list
| bsd-3-clause |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
KDD-OpenSource/geox-young-academy | day-3/Kalman-filter_Mark.py | 1 | 1494 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 10:10:24 2017
@author: Mark
"""
import numpy as np
import matplotlib.pyplot as plt
#Define functions
def model(state_0,A,B):
state_1 = A*state_0 + np.random.normal(0,B)
return state_1
state_null=np.random.normal(0,0.4)
def observation_function(state,R):
obs=state+np.random.normal(0,R)
return obs
def forecast(state_0,cov_0,A,B):
state_1=A*state_0
cov_1=A*cov_0*A+B
return state_1,cov_1
def analysis_formulas(state_1_hat,cov_1_hat,K,H,obs_0):
state_1 = state_1_hat - K*(H*state_1_hat - obs_0)
cov_1 = cov_1_hat - K*H*cov_1_hat
return state_1, cov_1
def kalman_gain(cov_1_hat,H,R):
K = cov_1_hat*H*(R+H*cov_1_hat*H)**(-1)
return K
#Initialize model parameters
A = 0.5
H = 1
B = 0.5
R = 0.1
lev = 100
#Sythetic Model
STATE_real = np.zeros(lev)
OBS_real = np.zeros(lev)
STATE_real[0] = np.random.normal(5,0.1)
OBS_real[0] = observation_function(STATE_real[0],R)
for i in range (1,lev-1):
STATE_real[i] = model(STATE_real[i-1],0.4,0.01)
OBS_real[i] = observation_function(STATE_real[i],R)
#Kalman-filter
STATE = np.zeros(lev)
COV = np.zeros(lev)
STATE[0] = state_null
COV[0] = B
for i in range (1,lev-1):
(state_hat,cov_hat) = forecast(STATE[i-1],COV[i-1],A,B)
K = kalman_gain(cov_hat,H,R)
(STATE[i],COV[i]) = analysis_formulas(state_hat,cov_hat,K,H,OBS_real[i])
plt.plot(STATE)
plt.plot(STATE_real)
| mit |
carlvlewis/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
nikhilgahlawat/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
waddell/urbansim | urbansim/urbanchoice/mnl.py | 4 | 9002 | """
Number crunching code for multinomial logit.
``mnl_estimate`` and ``mnl_simulate`` especially are used by
``urbansim.models.lcm``.
"""
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import scipy.optimize
import pmat
from pmat import PMAT
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
# right now MNL can only estimate location choice models, where every equation
# is the same
# it might be better to use stats models for a non-location choice problem
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_probs(data, beta, numalts):
logging.debug('start: calculate MNL probabilities')
clamp = data.typ == 'numpy'
utilities = beta.multiply(data)
if numalts == 0:
raise Exception("Number of alternatives is zero")
utilities.reshape(numalts, utilities.size() / numalts)
exponentiated_utility = utilities.exp(inplace=True)
if clamp:
exponentiated_utility.inftoval(1e20)
if clamp:
exponentiated_utility.clamptomin(1e-300)
sum_exponentiated_utility = exponentiated_utility.sum(axis=0)
probs = exponentiated_utility.divide_by_row(
sum_exponentiated_utility, inplace=True)
if clamp:
probs.nantoval(1e-300)
if clamp:
probs.clamptomin(1e-300)
logging.debug('finish: calculate MNL probabilities')
return probs
def get_hessian(derivative):
return np.linalg.inv(np.dot(derivative, np.transpose(derivative)))
def get_standard_error(hessian):
return np.sqrt(np.diagonal(hessian))
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_loglik(beta, data, chosen, numalts, weights=None, lcgrad=False,
stderr=0):
logger.debug('start: calculate MNL log-likelihood')
numvars = beta.size
numobs = data.size() / numvars / numalts
beta = np.reshape(beta, (1, beta.size))
beta = PMAT(beta, data.typ)
probs = mnl_probs(data, beta, numalts)
# lcgrad is the special gradient for the latent class membership model
if lcgrad:
assert weights
gradmat = weights.subtract(probs).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
else:
if not weights:
gradmat = chosen.subtract(probs).reshape(probs.size(), 1)
else:
gradmat = chosen.subtract(probs).multiply_by_row(
weights).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
if stderr:
gradmat = data.multiply_by_row(gradmat.reshape(1, gradmat.size()))
gradmat.reshape(numvars, numalts * numobs)
return get_standard_error(get_hessian(gradmat.get_mat()))
chosen.reshape(numalts, numobs)
if weights is not None:
if probs.shape() == weights.shape():
loglik = ((probs.log(inplace=True)
.element_multiply(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = ((probs.log(inplace=True)
.multiply_by_row(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = (probs.log(inplace=True).element_multiply(
chosen, inplace=True)).sum(axis=1).sum(axis=0)
if loglik.typ == 'numpy':
loglik, gradarr = loglik.get_mat(), gradarr.get_mat().flatten()
else:
loglik = loglik.get_mat()[0, 0]
gradarr = np.reshape(gradarr.get_mat(), (1, gradarr.size()))[0]
logger.debug('finish: calculate MNL log-likelihood')
return -1 * loglik, -1 * gradarr
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=True):
"""
Get the probabilities for each chooser choosing between `numalts`
alternatives.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
coeff : 1D array
The model coefficients corresponding to each column in `data`.
numalts : int
The number of alternatives available to each chooser.
GPU : bool, optional
returnprobs : bool, optional
If True, return the probabilities for each chooser/alternative instead
of actual choices.
Returns
-------
probs or choices: 2D array
If `returnprobs` is True the probabilities are a 2D array with a
row for each chooser and columns for each alternative.
"""
logger.debug(
'start: MNL simulation with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
data = np.transpose(data)
coeff = np.reshape(np.array(coeff), (1, len(coeff)))
data, coeff = PMAT(data, atype), PMAT(coeff, atype)
probs = mnl_probs(data, coeff, numalts)
if returnprobs:
return np.transpose(probs.get_mat())
# convert to cpu from here on - gpu doesn't currently support these ops
if probs.typ == 'cuda':
probs = PMAT(probs.get_mat())
probs = probs.cumsum(axis=0)
r = pmat.random(probs.size() / numalts)
choices = probs.subtract(r, inplace=True).firstpositive(axis=0)
logger.debug('finish: MNL simulation')
return choices.get_mat()
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3),
weights=None, lcgrad=False, beta=None):
"""
Calculate coefficients of the MNL model.
Parameters
----------
data : 2D array
The data are expected to be in "long" form where each row is for
one alternative. Alternatives are in groups of `numalts` rows per
choosers. Alternatives must be in the same order for each chooser.
chosen : 2D array
This boolean array has a row for each chooser and a column for each
alternative. The column ordering for alternatives is expected to be
the same as their row ordering in the `data` array.
A one (True) indicates which alternative each chooser has chosen.
numalts : int
The number of alternatives.
GPU : bool, optional
coeffrange : tuple of floats, optional
Limits of (min, max) to which coefficients are clipped.
weights : ndarray, optional
lcgrad : bool, optional
beta : 1D array, optional
Any initial guess for the coefficients.
Returns
-------
log_likelihood : dict
Dictionary of log-likelihood values describing the quality of
the model fit.
fit_parameters : pandas.DataFrame
Table of fit parameters with columns 'Coefficient', 'Std. Error',
'T-Score'. Each row corresponds to a column in `data` and are given
in the same order as in `data`.
See Also
--------
scipy.optimize.fmin_l_bfgs_b : The optimization routine used.
"""
logger.debug(
'start: MNL fit with len(data)={} and numalts={}'.format(
len(data), numalts))
atype = 'numpy' if not GPU else 'cuda'
numvars = data.shape[1]
numobs = data.shape[0] / numalts
if chosen is None:
chosen = np.ones((numobs, numalts)) # used for latent classes
data = np.transpose(data)
chosen = np.transpose(chosen)
data, chosen = PMAT(data, atype), PMAT(chosen, atype)
if weights is not None:
weights = PMAT(np.transpose(weights), atype)
if beta is None:
beta = np.zeros(numvars)
bounds = [coeffrange] * numvars
with log_start_finish('scipy optimization for MNL fit', logger):
args = (data, chosen, numalts, weights, lcgrad)
bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik,
beta,
args=args,
fprime=None,
factr=10,
approx_grad=False,
bounds=bounds
)
beta = bfgs_result[0]
stderr = mnl_loglik(
beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad)
l0beta = np.zeros(numvars)
l0 = -1 * mnl_loglik(l0beta, *args)[0]
l1 = -1 * mnl_loglik(beta, *args)[0]
log_likelihood = {
'null': float(l0[0][0]),
'convergence': float(l1[0][0]),
'ratio': float((1 - (l1 / l0))[0][0])
}
fit_parameters = pd.DataFrame({
'Coefficient': beta,
'Std. Error': stderr,
'T-Score': beta / stderr})
logger.debug('finish: MNL fit')
return log_likelihood, fit_parameters
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
AxelTLarsson/robot-localisation | robot_localisation/main.py | 1 | 6009 | """
This module contains the logic to run the simulation.
"""
import sys
import os
import argparse
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from robot_localisation.grid import Grid, build_transition_matrix
from robot_localisation.robot import Robot, Sensor
from robot_localisation.hmm_filter import FilterState
def help_text():
"""
Return a helpful text explaining usage of the program.
"""
return """
------------------------------- HMM Filtering ---------------------------------
Type a command to get started. Type 'quit' or 'q' to quit.
Valid commands (all commands are case insensitive):
ENTER move the robot one step further in the simulation,
will also output current pose and estimated
position of the robot
help show this help text
show T show the transition matrix T
show f show the filter column vector
show O show the observation matrix
quit | q quit the program
-------------------------------------------------------------------------------
"""
def main():
parser = argparse.ArgumentParser(description='Robot localisation with HMM')
parser.add_argument(
'-r', '--rows',
type=int,
help='the number of rows on the grid, default is 4',
default=4)
parser.add_argument(
'-c', '--columns',
type=int,
help='the number of columns on the grid, default is 4',
default=4)
args = parser.parse_args()
# Initialise the program
size = (args.rows, args.columns)
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
sensor_value = None
obs = None
print(help_text())
print("Grid size is {} x {}".format(size[0], size[1]))
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
np.set_printoptions(linewidth=1000)
# Main loop
while True:
user_command = str(input('> '))
if user_command.upper() == 'QUIT' or user_command.upper() == 'Q':
break
elif user_command.upper() == 'HELP':
print(help_text())
elif user_command.upper() == 'SHOW T':
print(the_T_matrix)
elif user_command.upper() == 'SHOW F':
print(the_filter.belief_matrix)
elif user_command.upper() == 'SHOW O':
print(obs)
elif not user_command:
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
print(the_robot)
print("The sensor says: {}".format(sensor_value))
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
print("The HMM filter thinks the robot is at {}".format(filter_est))
print("The Manhattan distance is: {}".format(
manhattan(the_robot.get_position(), pos_est)))
else:
print("Unknown command!")
def manhattan(pos1, pos2):
"""
Calculate the Manhattan distance between pos1 and pos2.
"""
x1, y1 = pos1
x2, y2 = pos2
return abs(x1-x2) + abs(y1-y2)
def automated_run():
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 7))
navg = 20
nsteps = 10
for size in (2, 2), (3, 3), (4, 4), (5, 5), (10, 10):
avg_distances = np.zeros(shape=(nsteps+1,))
for n in range(navg):
distances = list()
none_values = list()
the_T_matrix = build_transition_matrix(*size)
the_filter = FilterState(transition=the_T_matrix)
the_sensor = Sensor()
the_grid = Grid(*size)
the_robot = Robot(the_grid, the_T_matrix)
# get the manhattan distance at the start
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
for i in range(nsteps):
# take a step then approximate etc.
the_robot.step()
sensor_value = the_sensor.get_position(the_robot)
if sensor_value is None:
none_values.append(i) # keep track of where None was returned
obs = the_sensor.get_obs_matrix(sensor_value, size)
the_filter.forward(obs)
filter_est = the_grid.index_to_pose(the_filter.belief_state)
pos_est = (filter_est[0], filter_est[1])
distances.append(manhattan(the_robot.get_position(), pos_est))
avg_distances += np.array(distances)
avg_distances /= navg
base_line, = plt.plot(avg_distances, label="Grid size {}".format(size))
# for point in none_values:
# plt.scatter(point, distances[point], marker='o',
# color=base_line.get_color(), s=40)
plt.legend()
plt.xlim(0, nsteps)
plt.ylim(0,)
plt.ylabel("Manhattan distance")
plt.xlabel("Steps")
plt.title("Manhattan distance from true position and inferred position \n"
"from the hidden Markov model (average over %s runs)" % navg)
fig.savefig("automated_run.png")
plt.show()
if __name__ == '__main__':
main()
# automated_run()
| mit |
ofgulban/scikit-image | doc/examples/filters/plot_rank_mean.py | 7 | 1525 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = data.coins()
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| bsd-3-clause |
B3AU/waveTree | sklearn/utils/testing.py | 4 | 12125 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import scipy as sp
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
from .fixes import savemat
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises", "raises",
"with_setup", "assert_true", "assert_false", "assert_almost_equal",
"assert_array_equal", "assert_array_almost_equal",
"assert_array_less"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_others : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
module = __import__(modname, fromlist="dummy")
if ".tests." in modname:
continue
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_other:
estimators = [c for c in estimators if not c[0] in other]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in meta_estimators]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
| bsd-3-clause |
otmaneJai/Zipline | zipline/sources/data_frame_source.py | 26 | 5253 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
self.data = self.data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
ashhher3/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
nolanliou/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
kaichogami/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
alalbiol/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
nealbob/nealbob.github.io | _site/code/multicore_storage_sim.py | 2 | 2177 | import numpy as np
from matplotlib import pyplot as plt
import time
from multiprocessing import Process
from multiprocessing.queues import Queue
def retry_on_eintr(function, *args, **kw):
while True:
try:
return function(*args, **kw)
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
class RetryQueue(Queue):
"""Queue which will retry if interrupted with EINTR."""
def get(self, block=True, timeout=None):
return retry_on_eintr(Queue.get, self, block, timeout)
def simulate(K, mu, sig, Sbar, T, multi=False, que=0, jobno=0):
np.random.seed(jobno)
S = np.zeros(T+1)
W = np.zeros(T+1)
I = np.zeros(T+1)
S[0] = K
for t in range(T):
W[t] = min(S[t], Sbar)
I[t+1] = max(np.random.normal(mu, sig), 0)
S[t+1] = min(S[t] - W[t] + I[t+1], K)
if multi:
que.put(S)
else:
return S
def multi_sim(CORES=2, T=100):
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
return S
"""
### Sample size
T = 1000000
# Single core run ==================================
tic = time.time()
S = simulate(100, 70, 70, 70, T)
toc = time.time()
print 'Single core run time: ' + str(round(toc - tic,3))
plt.plot(S[0:100])
plt.show()
# Multi core run ==================================
tic = time.time()
CORES = 2
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
toc = time.time()
print 'Multi-core run time: ' + str(toc - tic)
plt.plot(S[0:100])
plt.show()
print S.shape
plt.scatter(results[0], results[1])
plt.show()
"""
| mit |
samzhang111/wikipedia-jargon | all-subjects/make_tf_differences.py | 1 | 2815 | from __future__ import print_function
import msgpack
import sys
import os
from collections import defaultdict
from helpers import text_dict_to_term_dict
from WikiExtractor import clean, compact
import pandas as pd
def remove_wikipedia_markup(text):
return compact(clean(text.decode('utf8')))
def print_help_and_exit(msg=''):
if msg:
print('Error: {}\n'.format(msg))
print('Usage: python make_tf_differences.py [n-grams] [path to directory]')
print('The directory should contain files output by grab_texts.py')
sys.exit(1)
if len(sys.argv) <= 2:
print_help_and_exit()
##############################################################
# Read in msgpack files, separating them from simple and en Wikipedia
##############################################################
ngrams = int(sys.argv[1])
text_dir = sys.argv[2]
only = sys.argv[3:]
print('Only calculating for: ', only)
try:
files = os.listdir(text_dir)
except OSError:
print_help_and_exit()
##############################################################
# Organize the text files by subject, then wiki (en or simple)
##############################################################
file_dict = defaultdict(dict)
for f in files:
try:
subject, wiki, _ = f.split('_')
if only and subject not in only:
continue
file_dict[subject][wiki] = f
except ValueError:
print_help_and_exit('Text directory does not contain valid filenames')
for subject in file_dict:
print('Importing ', subject)
with open(os.path.join(text_dir, file_dict[subject]['en'])) as f:
en_text = msgpack.load(f)
en_text = {k: remove_wikipedia_markup(v) for k,v in en_text.items()}
with open(os.path.join(text_dir, file_dict[subject]['simple'])) as f:
sm_text = msgpack.load(f)
sm_text = {k: remove_wikipedia_markup(v) for k,v in sm_text.items()}
print('Calculating term differences')
en_tf, en_counts = text_dict_to_term_dict(en_text, ngrams)
sm_tf, sm_counts = text_dict_to_term_dict(sm_text, ngrams)
sm_terms = set(sm_tf)
en_terms = set(en_tf)
term_differences = {}
for t in sm_terms.union(en_terms):
term_differences[t] = en_tf[t] - sm_tf[t]
sorted_term_difference = sorted(term_differences.items(),
key=lambda x: x[1])
print('Outputting term differences')
td_df = pd.DataFrame(sorted_term_difference, columns=['term',
'term_difference'])
td_df['en_tf'] = td_df.term.apply(lambda x: en_tf[x])
td_df['sm_tf'] = td_df.term.apply(lambda x: sm_tf[x])
try:
os.mkdir('data/term-diffs/ngrams-{}'.format(ngrams))
except OSError:
pass
td_df.to_csv('data/term-diffs/ngrams-{}/{}_td.csv'.format(ngrams, subject),
index=False, encoding='utf8')
| gpl-3.0 |
akloster/bokeh | bokeh/properties.py | 20 | 42601 | """ Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import re
import types
import difflib
import datetime
import dateutil.parser
import collections
from importlib import import_module
from copy import copy
from warnings import warn
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums
from .util.string import nice_join
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class Property(object):
""" Base class for all type properties. """
def __init__(self, default=None, help=None):
""" This is how the descriptor is created in the class declaration """
if isinstance(default, types.FunctionType): # aka. lazy value
self.validate(default())
else:
self.validate(default)
self._default = default
self.__doc__ = help
self.alternatives = []
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@property
def default(self):
if not isinstance(self._default, types.FunctionType):
return copy(self._default)
else:
value = self._default()
self.validate(value)
return value
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def from_json(self, json, models=None):
return json
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def _get(self, obj):
if not hasattr(obj, self._name):
setattr(obj, self._name, self.default)
return getattr(obj, self._name)
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def __set__(self, obj, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Include(object):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop.delegate
if prop.use_prefix:
prefix = re.sub("_props$", "", name) + "_"
else:
prefix = ""
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
if "%s" in prop.help:
doc = prop.help % subpropname.replace('_', ' ')
else:
doc = prop.help
try:
includes[fullpropname] = subprop(help=doc)
except TypeError:
includes[fullpropname] = subprop
subprop.__doc__ = doc
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
units_to_add = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
if hasattr(prop, '_units_type'):
units_to_add[name+"_units"] = prop._units_type
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
for name, prop in units_to_add.items():
prop.name = name
names.add(name)
class_dict[name] = prop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._changed_vars = set()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
props = sorted(self.properties())
if name.startswith("_") or name in props:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.changed_properties_with_values())
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties(self):
return self.changed_vars()
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = (bool,)
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (float, ) + bokeh_integer_types
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (complex, float) + bokeh_integer_types
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedProperty(Property):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
""" Base class for Container-like type properties. """
pass
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# opional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(Property):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .plot_object import PlotObject
if issubclass(self.instance_type, PlotObject):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserilize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property):
""" Any type property accepts any values. """
pass
class Function(Property):
""" Function type property. """
pass
class Event(Property):
""" Event type property. """
pass
class Interval(ParameterizedProperty):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
help = kwargs.get("help")
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(Property):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
# Check for None value
if val is None:
return dict(value=None)
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r)" % (self.__class__.__name__, val)
class NumberSpec(DataSpec):
def __init__(self, default, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
super(StringSpec, self).__set__(obj, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def __set__(self, obj, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
super(FontSizeSpec, self).__set__(obj, value)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
self._units_type.validate(units_default)
self._units_type._default = units_default
def to_dict(self, obj):
d = super(UnitsSpec, self).to_dict(obj)
d["units"] = getattr(obj, self.name+"_units")
return d
def __set__(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units: setattr(obj, self.name+"_units", units)
super(UnitsSpec, self).__set__(obj, value)
def __str__(self):
val = getattr(self, self._name, self.default)
return "%s(%r, units_default=%r)" % (self.__class__.__name__, val, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DistanceSpec, self).__set__(obj, value)
class ScreenDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "screen"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(ScreenDistanceSpec, self).__set__(obj, value)
class DataDistanceSpec(NumberSpec):
def to_dict(self, obj):
d = super(ScreenDistanceSpec, self).to_dict(obj)
d["units"] = "data"
return d
def __set__(self, obj, value):
try:
if value < 0:
raise ValueError("Distances must be non-negative")
except TypeError:
pass
super(DataDistanceSpec, self).__set__(obj, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor._values)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_dict(self, obj):
val = getattr(obj, self._name, self.default)
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
| bsd-3-clause |
tsherwen/AC_tools | Scripts/2D_GEOSChem_slice_subregion_plotter_example.py | 1 | 2934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Plotter for 2D slices of GEOS-Chem output NetCDFs files.
NOTES
---
- This is setup for Cly, but many other options (plot/species) are availible
by just updating passed variables/plotting function called.
"""
import AC_tools as AC
import numpy as np
import matplotlib.pyplot as plt
def main():
"""
Basic plotter of NetCDF files using AC_tools
"""
# --- Local settings hardwired here...
fam = 'Cly' # Family to plot
# print species in family for reference...
print((AC.GC_var(fam)))
# --- Get working directory etc from command line (as a dictionary object)
# (1st argument is fil directory with folder, 2nd is filename)
Var_rc = AC.get_default_variable_dict()
# Get details on extracted data (inc. resolution)
Data_rc = AC.get_shared_data_as_dict(Var_rc=Var_rc)
# --- extract data and units of data for family/species...
arr, units = AC.fam_data_extractor(wd=Var_rc['wd'], fam=fam,
res=Data_rc['res'], rtn_units=True, annual_mean=False)
# --- Process data (add and extra processing of data here... )
# take average over time
print((arr.shape))
arr = arr.mean(axis=-1)
# Select surface values
print((arr.shape))
arr = arr[..., 0]
# convert to pptv
arr = arr*1E12
units = 'pptv'
# --- Plot up data...
print((arr.shape))
# - Plot a (very) simple plot ...
# AC.map_plot( arr.T, res=Data_rc['res'] )
# - plot a slightly better plot...
# (loads of options here - just type help(AC.plot_spatial_figure) in ipython)
# set range for data...
fixcb = np.array([0., 100.])
# number of ticks on colorbar (make sure the fixcb range divides by this)
nticks = 6
interval = (1/3.) # number of lat/lon labels... (x*15 degrees... )
# set limits of plot
lat_min = 5.
lat_max = 75.
lon_min = -30.
lon_max = 60.
left_cb_pos = 0.85 # set X (fractional) position
axis_titles = True # add labels for lat and lon
# title for plot
title = "Plot of annual average {}".format(fam)
# save as pdf (just set to True) or show?
# figsize = (7,5) # figsize to use? (e.g. square or rectangular plot)
# call plotter...
AC.plot_spatial_figure(arr, res=Data_rc['res'], units=units, fixcb=fixcb,
lat_min=lat_min, lat_max=lat_max, lon_min=lon_min, lon_max=lon_max,
axis_titles=axis_titles, left_cb_pos=left_cb_pos,
nticks=nticks, interval=interval, title=title, show=False)
# are the spacings right? - if not just up
bottom = 0.1
top = 0.9
left = 0.1
right = 0.9
fig = plt.gcf()
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right)
# show and save as PDF?
plt.savefig('pete_plot.png')
AC.show_plot()
if __name__ == "__main__":
main()
| mit |
mmottahedi/neuralnilm_prototype | scripts/e249.py | 2 | 3897 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=True
#lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
layers_config=[
{
'type': LSTMLayer,
'num_units': 10,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
]
)
def exp_x(name, learning_rate):
global source
try:
a = source
except NameError:
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
updates=partial(nesterov_momentum, learning_rate=learning_rate)
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
for experiment, learning_rate in [('a', 1.0), ('b', 0.1), ('c', 0.01),
('d', 0.001), ('e', 0.0001)]:
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name, learning_rate)
run_experiment(net, path, epochs=1000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
| mit |
PyQuake/earthquakemodels | code/runExperiments/histogramMagnitude.py | 1 | 1982 | import matplotlib.pyplot as plt
import models.model as model
import earthquake.catalog as catalog
from collections import OrderedDict
def histogramMagnitude(catalog_, region):
"""
Creates the histogram of magnitudes by a given region.
Saves the histogram to the follwing path ./code/Zona2/histograms/'+region+'/Magnitude Histogram of ' + str(year) + " " + region + '.png'
Where region, year are given by the application
From 2000 to 2011
"""
definition = model.loadModelDefinition('../params/' + region + '.txt')
catalogFiltred = catalog.filter(catalog_, definition)
year = 2000
while(year < 2012):
data = dict()
for i in range(len(catalogFiltred)):
if catalogFiltred[i]['year'] == year and catalogFiltred[i]['lat'] > 34.8 and catalogFiltred[i][
'lat'] < 37.05 and catalogFiltred[i]['lon'] > 138.8 and catalogFiltred[i]['lon'] < 141.05:
data[catalogFiltred[i]['mag']] = data.get(catalogFiltred[i]['mag'], 0) + 1
b = OrderedDict(sorted(data.items()))
plt.title('Histogram of ' + str(year) + " " + region)
plt.bar(range(len(data)), b.values(), align='center')
plt.xticks(range(len(data)), b.keys(), rotation=25)
# print(b)
axes = plt.gca()
plt.savefig(
'../Zona2/histograms/'+region+'/Magnitude Histogram of ' +
str(year) +
" " +
region +
'.png')
del data
year += 1
def main():
"""
Calls function to plot a hitogram of magnitudes by region, based on JMA catalog
"""
catalog_ = catalog.readFromFile('../data/jmacat_2000_2013.dat')
region = "Kanto"
histogramMagnitude(catalog_, region)
region = "Kansai"
histogramMagnitude(catalog_, region)
region = "Tohoku"
histogramMagnitude(catalog_, region)
region = "EastJapan"
histogramMagnitude(catalog_, region)
if __name__ == "__main__":
main()
| bsd-3-clause |
jereze/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
JaviMerino/lisa | libs/utils/analysis/frequency_analysis.py | 1 | 24894 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
| apache-2.0 |
jseabold/scikit-learn | sklearn/feature_selection/rfe.py | 4 | 15662 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv.split(X, y)):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores / cv.get_n_splits(X, y)
return self
| bsd-3-clause |
SamProtas/PALiquor | geocode_fixes.py | 1 | 2733 | import os
import pandas as pd
import numpy as np
import sqlite3
import requests
import time
def fix_location(lid, new_address):
pd.set_option('display.mpl_style', 'default')
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE1 = os.path.join(PROJECT_ROOT, 'dbs', 'licensees.db')
conn1 = sqlite3.connect(DATABASE1)
c = conn1.cursor()
c.execute('SELECT address, latitude, longitude FROM licensees WHERE lid = ?',[lid])
old_info = c.fetchone()
old_latitude = old_info[1]
old_longitude = old_info[2]
if old_latitude or old_longitude:
return 'No need to fix. Aborting geocode call.'
api_key = 'NOT MY REAL KEY!!!!!'
baseurl = 'https://maps.googleapis.com/maps/api/geocode/json?key='+api_key+'&address='
fullurl = baseurl + new_address
page = requests.get(fullurl)
latitude = page.json()['results'][0]['geometry']['location']['lat']
longitude = page.json()['results'][0]['geometry']['location']['lng']
c.execute('UPDATE licensees SET address = ?, latitude = ?, longitude = ? WHERE lid = ?',[new_address, latitude, longitude, lid])
conn1.commit()
c.close()
return 'Good Fix'
# Manually fixed addresses
fix_location(233,'US Customs House Chestnut Street Philadelphia PA')
time.sleep(.2)
fix_location(43444, '431 South Streeet Philadelphia PA')
time.sleep(.2)
fix_location(45162, '2457 Grant Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(69585, '2400 Strawberry Mansion Drive Philadelphia, PA 19132')
time.sleep(.2)
fix_location(44218, 'Chickie and Petes Roosevelt Boulevard, Philadelphia, PA 19116')
time.sleep(.2)
fix_location(48788, 'Diamond Club at Mitten Hall 1913 North Broad Street Philadelphia, PA 19122')
time.sleep(.2)
fix_location(64349, '51 North 12th Street Philadelphia, PA 19107')
time.sleep(.2)
fix_location(64754, '1420 Locust Street Philadelphia PA 19102')
time.sleep(.2)
fix_location(50302, '39 Snyder Ave Philadelphia PA 19148')
time.sleep(.2)
fix_location(61215, '9910 Frankford Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(65590, '11000 E Roosevelt BLVD Philadelphia PA')
time.sleep(.2)
fix_location(26715, 'Knights Road Shopping Center 4018 Woodhaven Road Philadelphia, PA 19154')
time.sleep(.2)
fix_location(66741, '9183 Roosevelt BLVD Philadelphia PA 19114')
time.sleep(.2)
fix_location(65221, '129 S 30th St Philadelphia PA 19104')
time.sleep(.2)
fix_location(23775, 'The Bellevue Philadelphia PA 19103')
time.sleep(.2)
fix_location(55796, '5765 Wister St Philadelphia PA 19138')
time.sleep(.2)
fix_location(25469, 'Market East Philadelphia PA 19107')
time.sleep(.2)
fix_location(1140, 'torresdale and decatour, philadelphia pa')
| gpl-2.0 |
ThomasSweijen/TPF | examples/adaptiveintegrator/simple-scene-plot-NewtonIntegrator.py | 6 | 2027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=0.0,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
dhhagan/PAM | Python/PAM.py | 1 | 5037 | #PAM.py
import re
import glob, os, time
from numpy import *
from pylab import *
def analyzeFile(fileName,delim):
cols = {}
indexToName = {}
lineNum = 0
goodLines = 0
shortLines = 0
FILE = open(fileName,'r')
for line in FILE:
line = line.strip()
if lineNum < 1:
lineNum += 1
continue
elif lineNum == 1:
headings = line.split(delim)
i = 0
for heading in headings:
heading = heading.strip()
cols[heading] = []
indexToName[i] = heading
i += 1
lineNum += 1
lineLength = len(cols)
else:
data = line.split(delim)
if len(data) == lineLength:
goodLines += 1
i = 0
for point in data:
point = point.strip()
cols[indexToName[i]] += [point]
i += 1
lineNum += 1
else:
shortLines += 1
lineNum += 1
continue
FILE.close
return cols, indexToName, lineNum, shortLines
def numericalSort(value):
numbers = re.compile(r'(\d+)')
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def popDate(fileName):
run = fileName.split('.')[0]
runNo = run.split('_')[-1]
return runNo
def getFile(date,regex):#Works
files = []
files = sorted((glob.glob('*'+regex+'*')),key=numericalSort,reverse=False)
if date.lower() == 'last':
files = files.pop()
else:
files = [item for item in files if re.search(date,item)]
return files
def plotConc(data,ozone,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
#time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
legend1 = []
legend2 = []
fig = plt.figure('Gas Concentration Readings for East St.Louis')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key, value in ozone.items():
ax2.plot_date(x,ozone[key],'-.',xdate=True)
legend2.append(key)
title('Gas Concentrations for East St. Louis', fontsize = 12)
ax1.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
ax2.set_ylabel(r'$Concentration(ppb)$', fontsize = 12)
xlabel(r"$Time \, Stamp$", fontsize = 12)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
grid(True)
return
def plotBankRelays(data,relays,times):
# This function plots data versus time
import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
time = [dt.datetime.strptime(time,"%m/%d/%Y %I:%M:%S %p") for time in times]
x = date2num(time)
#x1 = [date.strftime("%m-%d %H:%M:%S") for date in time]
legend1 = []
legend2 = []
#plt.locator_params(axis='x', nbins=4)
fig = plt.figure('VAPS Thermocouple Readings: Chart 2')
ax1 = fig.add_subplot(111)
ax2 = twinx()
for key,value in data.items():
ax1.plot_date(x,data[key],'-',xdate=True)
legend1.append(key)
for key,value in relays.items():
ax2.plot_date(x,relays[key],'--',xdate=True)
legend2.append(key)
title('VAPS Temperatures: Chart 2', fontsize = 12)
ax1.set_ylabel(r'$Temperature(^oC)$', fontsize = 12)
ax2.set_ylabel(r'$Relay \, States$', fontsize = 12)
ax1.set_xlabel(r"$Time \, Stamp$", fontsize = 12)
#print [num2date(item) for item in ax1.get_xticks()]
#ax1.set_xticks(x)
#ax1.set_xticklabels([date.strftime("%m-%d %H:%M %p") for date in time])
#ax1.legend(bbox_to_anchor=(0.,1.02,1.,.102),loc=3,ncol=2,mode="expand",borderaxespad=0.)
ax1.legend(legend1,loc='upper right')
ax2.legend(legend2,loc='lower right')
#ax1.xaxis.set_major_formatter(FormatStrFormatter(date.strftime("%m-%d %H:%M:%S")))
plt.subplots_adjust(bottom=0.15)
grid(True)
return
def goodFiles(files,goodHeaders,delim): # Good
irregFiles = 0
goodFiles = []
for file in files:
lineNo = 0
falseCount = 0
FILE = open(file,'r')
for line in FILE:
line = line.strip()
if lineNo == 5:
# Check all the headings to make sure the file is good
head = line.split(delim)
for item in head:
if item in goodHeaders:
continue
else:
falseCount += 1
if falseCount == 0:
goodFiles.append(file)
else:
irregFiles += 1
lineNo += 1
else:
lineNo += 1
continue
FILE.close
return goodFiles, irregFiles
| mit |
mlee92/Programming | Econ/supply_demand_elasticity/demand_elasticity.py | 2 | 1413 | # Elasticity of demand is a measure of how strongly consumers respond to a change in the price of a good
# Formally, % change in demand / % change in price
# Problem: Graph the histogram of average-elasticity for a linear-demand good with random coefficients (a, b)
import random
import matplotlib.pyplot as plt
import numpy as np
SIM = 1000;
UNIT_RANGE = range(1, 50)
AVGS = list()
COEF = [0, 0]
def generate_coefficients():
global COEF
a = random.randint(1, 25)
b = random.randint(a*50, 25*50)
COEF = [a, b]
def price(unit):
return COEF[1] - COEF[0]*unit
def graph_price():
x = np.linspace(1,50,50)
y = price(x)
plt.plot(x, y)
plt.show()
def elasticity(d1, d2):
cPrice = price(d2) - price(d1)
cDemand = d2 - d1
pPrice = cPrice / price(d1)
pDemand = cDemand / d1
return abs(pDemand / pPrice)
def simulate():
global AVGS, COEF, UNIT_RANGE
generate_coefficients()
elast_list = list()
for i in UNIT_RANGE:
for j in UNIT_RANGE:
if(i != j):
elast_list.append(elasticity(i, j))
mu = np.mean(elast_list)
print(COEF, mu)
AVGS.append(mu)
def init():
for i in range(0, SIM):
simulate()
init()
print(SIM)
plt.hist(AVGS)
plt.show()
| gpl-2.0 |
ahye/FYS2140-Resources | examples/animation/func_animate_sin.py | 1 | 1284 | #!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Eksempelscript som viser hvordan en sinusboelge kan animeres med
funksjonsanimasjon.
@author Benedicte Emilie Braekken
"""
from numpy import *
from matplotlib.pyplot import *
from matplotlib import animation
def wave( x, t ):
'''
Funksjonen beskriver en sinusboelge ved tiden t og punktet x.
'''
omega = 1 # Vinkelhastighet
k = 1 # Boelgetall
return sin( k * x - omega * t )
T = 10
dt = 0.01
nx = 1e3
nt = int( T / dt ) # Antall tidssteg
t = 0
all_waves = [] # Tom liste for aa ta vare paa boelgetilstandene
x = linspace( -pi, pi, nx )
while t < T:
# Legger til en ny boelgetilstand for hver kjoering
all_waves.append( wave( x, t ) )
t += dt
# Tegner initialtilstanden
fig = figure() # Passer paa aa ta vare paa figuren
line, = plot( x, all_waves[0] )
draw()
# Konstanter til animasjonen
FPS = 60 # Bilder i sekundet
inter = 1. / FPS # Tid mellom hvert bilde
def init():
'''
'''
line.set_data( [], [] )
return line,
def get_frame( frame ):
'''
'''
line.set_data( x, all_waves[ frame ] )
return line,
anim = animation.FuncAnimation( fig, get_frame, init_func=init,
frames=nt, interval=inter, blit=True )
show()
| mit |
zfrenchee/pandas | pandas/tests/indexes/datetimes/test_arithmetic.py | 1 | 21153 | # -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if isinstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = getattr(pd.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
MobleyLab/SAMPL6 | host_guest/Analysis/Scripts/analyze_sampling.py | 1 | 116143 | #!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import collections
import copy
import itertools
import json
import math
import os
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
from matplotlib import pyplot as plt
from pkganalysis.stats import mean_confidence_interval
from pkganalysis.sampling import (SamplingSubmission, YankSamplingAnalysis,
YANK_N_ITERATIONS, DG_KEY, DDG_KEY, export_dictionary)
from pkganalysis.submission import (load_submissions)
# =============================================================================
# CONSTANTS
# =============================================================================
YANK_METHOD_PAPER_NAME = 'OpenMM/HREX'
# Paths to input data.
SAMPLING_SUBMISSIONS_DIR_PATH = '../SubmissionsDoNotUpload/975/'
YANK_ANALYSIS_DIR_PATH = 'YankAnalysis/Sampling/'
SAMPLING_ANALYSIS_DIR_PATH = '../SAMPLing/'
SAMPLING_DATA_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Data')
SAMPLING_PLOT_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Plots')
SAMPLING_PAPER_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'PaperImages')
# All system ids.
SYSTEM_IDS = [
'CB8-G3-0', 'CB8-G3-1', 'CB8-G3-2', 'CB8-G3-3', 'CB8-G3-4',
'OA-G3-0', 'OA-G3-1', 'OA-G3-2', 'OA-G3-3', 'OA-G3-4',
'OA-G6-0', 'OA-G6-1', 'OA-G6-2', 'OA-G6-3', 'OA-G6-4'
]
# Kelly's colors for maximum contrast.
# "gray95", "gray13", "gold2", "plum4", "darkorange1", "lightskyblue2", "firebrick", "burlywood3", "gray51", "springgreen4", "lightpink2", "deepskyblue4", "lightsalmon2", "mediumpurple4", "orange", "maroon", "yellow3", "brown4", "yellow4", "sienna4", "chocolate", "gray19"
KELLY_COLORS = ['#F2F3F4', '#222222', '#F3C300', '#875692', '#F38400', '#A1CAF1', '#BE0032', '#C2B280', '#848482', '#008856', '#E68FAC', '#0067A5', '#F99379', '#604E97', '#F6A600', '#B3446C', '#DCD300', '#882D17', '#8DB600', '#654522', '#E25822', '#2B3D26']
TAB10_COLORS = sns.color_palette('tab10')
# Index of Kelly's colors associated to each submission.
SUBMISSION_COLORS = {
'AMBER/APR': 'dodgerblue',#KELLY_COLORS[11],
'OpenMM/REVO': 'gold', #KELLY_COLORS[7],
'OpenMM/SOMD': KELLY_COLORS[4],
'GROMACS/EE': 'darkviolet', #KELLY_COLORS[3],
'GROMACS/EE-fullequil': 'hotpink', #KELLY_COLORS[10],
YANK_METHOD_PAPER_NAME: '#4ECC41', #'limegreen', #KELLY_COLORS[9],
'GROMACS/NS-DS/SB-long': KELLY_COLORS[6],
'GROMACS/NS-DS/SB': KELLY_COLORS[1],
'GROMACS/NS-Jarz-F': TAB10_COLORS[0],
'GROMACS/NS-Jarz-R': TAB10_COLORS[1],
'GROMACS/NS-Gauss-F': TAB10_COLORS[2],
'GROMACS/NS-Gauss-R': TAB10_COLORS[4],
'NAMD/BAR': 'saddlebrown'
}
SUBMISSION_LINE_STYLES = {
'AMBER/APR': '--',
'OpenMM/REVO': '-',
'OpenMM/SOMD': '-',
'GROMACS/EE': '-',
'GROMACS/EE-fullequil': '-',
YANK_METHOD_PAPER_NAME: '-',
'GROMACS/NS-DS/SB-long': '-',
'GROMACS/NS-DS/SB': '-',
'GROMACS/NS-Jarz-F': '-',
'GROMACS/NS-Jarz-R': '-',
'GROMACS/NS-Gauss-F': '-',
'GROMACS/NS-Gauss-R': '-',
'NAMD/BAR': '--',
}
N_ENERGY_EVALUATIONS_SCALE = 1e6
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def reduce_to_first_significant_digit(quantity, uncertainty):
"""Truncate a quantity to the first significant digit of its uncertainty."""
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
def load_yank_analysis():
"""Load the YANK analysis in a single dataframe."""
yank_free_energies = {}
for system_id in SYSTEM_IDS:
file_path = os.path.join(YANK_ANALYSIS_DIR_PATH, 'yank-{}.json'.format(system_id))
with open(file_path, 'r') as f:
yank_free_energies[system_id] = json.load(f)
return yank_free_energies
def fit_efficiency(mean_data, find_best_fit=True):
"""Compute the efficiency by fitting the model and using only the asymptotic data.
We fit using the simulation percentage as the independent value
because it is less prone to overflowing during fitting. We then
return the efficiency in units of (kcal/mol)**2/n_energy_evaluations.
"""
from scipy.optimize import curve_fit
def model(x, log_efficiency):
return np.exp(log_efficiency) / x
vars = mean_data['std'].values**2
cost = mean_data['Simulation percentage'].values
# cost = mean_data['N energy evaluations'].values / 1e7
if find_best_fit:
# Find fit with best error up to discarding 70% of calculation.
max_discarded = math.floor(0.5*len(cost))
else:
# Use all the data.
max_discarded = 1
# Fit.
fits = []
for n_discarded in range(max_discarded):
cost_fit = cost[n_discarded:]
vars_fit = vars[n_discarded:]
fit = curve_fit(model, cost_fit, vars_fit, p0=[0.0])
fits.append((np.exp(fit[0]), fit[1]))
# Find the fit with the minimum error.
n_discarded = fits.index(min(fits, key=lambda x: x[1]))
# Convert efficiency / simulation_percentage to efficiency / n_energy_evaluations
efficiency = fits[n_discarded][0][0] / 100 * mean_data['N energy evaluations'].values[-1]
# efficiency = fits[n_discarded][0][0] * 1e7
return efficiency, n_discarded
def export_submissions(submissions, reference_free_energies):
"""Export the submission data to CSV and JSON format."""
for submission in submissions:
exported_data = {}
# Export data of the 5 independent replicates.
for system_id in sorted(submission.data['System ID'].unique()):
system_id_data = submission.data[submission.data['System ID'] == system_id]
exported_data[system_id] = collections.OrderedDict([
('DG', system_id_data[DG_KEY].values.tolist()),
('dDG', system_id_data[DDG_KEY].values.tolist()),
('cpu_times', system_id_data['CPU time [s]'].values.tolist()),
('n_energy_evaluations', system_id_data['N energy evaluations'].values.tolist()),
])
# Export data of mean trajectory and confidence intervals.
mean_free_energies = submission.mean_free_energies()
for system_name in mean_free_energies['System name'].unique():
system_name_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Obtain free energies and bias.
free_energies = system_name_data[DG_KEY].values
free_energies_ci = system_name_data['$\Delta$G CI'].values
reference_diff = free_energies - reference_free_energies.loc[system_name, '$\Delta$G [kcal/mol]']
exported_data[system_name + '-mean'] = collections.OrderedDict([
('DG', free_energies.tolist()),
('DG_CI', free_energies_ci.tolist()),
('reference_difference', reference_diff.tolist()),
('n_energy_evaluations', system_name_data['N energy evaluations'].values.tolist()),
])
# Export.
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, submission.receipt_id)
export_dictionary(exported_data, file_base_path)
# =============================================================================
# PLOTTING FUNCTIONS
# =============================================================================
def plot_mean_free_energy(mean_data, ax, x='Simulation percentage',
color_mean=None, color_ci=None, zorder=None,
start=None, stride=1, scale_n_energy_evaluations=True,
plot_ci=True, **plot_kwargs):
"""Plot mean trajectory with confidence intervals."""
ci_key = '$\Delta$G CI'
if start is None:
# Discard the first datapoint which are 0.0 (i.e. no estimate).
start = np.nonzero(mean_data[DG_KEY].values)[0][0]
if x == 'N energy evaluations' and scale_n_energy_evaluations:
# Plot in millions of energy evaluations.
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
x = mean_data[x].values[start::stride] / scale
mean_dg = mean_data[DG_KEY].values[start::stride]
sem_dg = mean_data[ci_key].values[start::stride]
# Plot mean trajectory confidence intervals.
if plot_ci:
ax.fill_between(x, mean_dg + sem_dg, mean_dg - sem_dg, alpha=0.15, color=color_ci, zorder=zorder)
# Plot the mean free energy trajectory.
if zorder is not None:
# Push the CI shaded area in the background so that the trajectories are always visible.
zorder += 20
ax.plot(x, mean_dg, color=color_mean, alpha=1.0, zorder=zorder, **plot_kwargs)
return ax
def plot_mean_data(mean_data, axes, color=None, ls=None, label=None, x='N energy evaluations',
zorder=None, plot_std=True, plot_bias=True, plot_ci=True):
"""Plot free energy, variance and bias as a function of the cost in three different axes."""
# Do not plot the part of data without index.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
# If the x-axis is the number of energy/force evaluations, plot it in units of millions.
if x == 'N energy evaluations':
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(mean_data, x=x, ax=axes[0],
color_mean=color, color_ci=color, ls=ls, zorder=zorder,
start=first_nonzero_idx, label=label, plot_ci=plot_ci)
# Plot standard deviation of the trajectories.
if plot_std:
axes[1].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['std'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
if plot_bias:
axes[2].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['bias'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
def align_yaxis(ax1, v1, ax2, v2):
"""Adjust ax2 ylimit so that v2 in in the twin ax2 is aligned to v1 in ax1.
From https://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin .
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
# =============================================================================
# FIGURE 1 - SAMPLING CHALLENGE OVERVIEW
# =============================================================================
def plot_example_bias_variance(yank_analysis, type='mixed', cost='generic',
max_n_eval_percentage=1.0,
mixed_proportion=0.5,
model_free_energy=None,
plot_experimental_value=False):
"""Free energy trajectories used to visualize bias and variance on the plots.
This is used to illustrate how bias and uncertainty are intended in the paper.
Parameters
----------
type : str, optional
Can be 'single' (plot only CB8-G3-1), 'all' (plot all system IDs of CB8-G3),
'mean' (plot mean trajectory and uncertainties), and 'mixed (first part is
all system IDs and second part is mean trajectory and uncertainties).
cost : str, optional
Can be 'generic' (no label on x-axis) or 'neval' (x-axis in number of
energy evaluations).
mixed_proportion : float, optional
The proportion of all System IDs and mean trajectories in mixed-type plots.
"""
# sns.set_context('paper', font_scale=1.6)
sns.set_style('white')
sns.set_context('paper', font_scale=1.0)
# Load the data
n_iterations = 40000
cb8_data = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=False)
cb8_data_mean = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=True)
max_n_eval = max(cb8_data_mean['N energy evaluations'])
max_n_eval_scaled = int(max_n_eval / N_ENERGY_EVALUATIONS_SCALE)
max_displayed_n_eval = next(x for x in cb8_data_mean['N energy evaluations'] if x >= max_n_eval * max_n_eval_percentage)
max_displayed_n_eval_scaled = int(max_displayed_n_eval / N_ENERGY_EVALUATIONS_SCALE)
# Determine the asymptotic free energy if not given.
if model_free_energy is None:
model_free_energy = cb8_data_mean[DG_KEY].values[-1]
# Scale the number of energy evaluations.
cb8_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(2.5, 1.8))
if type == 'single':
# Plot only CB8-G3-1.
cb8_data_1 = cb8_data[cb8_data['System ID'] == 'CB8-G3-1']
sns.lineplot(data=cb8_data_1, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'all':
# Plot the 5 replicates individual trajectories.
sns.lineplot(data=cb8_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'mean':
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(cb8_data_mean, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci='black',
scale_n_energy_evaluations=True)
elif type == 'mixed':
# Plot all System IDs for the first half and mean/uncertainty in second half.
half_n_eval = max_displayed_n_eval_scaled * mixed_proportion
cb8_data_first_half = cb8_data[cb8_data['N energy evaluations'] <= half_n_eval + max_n_eval_scaled / 100]
sns.lineplot(data=cb8_data_first_half, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
cb8_data_second_half = cb8_data_mean[cb8_data_mean['N energy evaluations'] >= half_n_eval * N_ENERGY_EVALUATIONS_SCALE]
plot_mean_free_energy(cb8_data_second_half, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci=(0.3, 0.3, 0.3), scale_n_energy_evaluations=True,
ls='--')
try:
ax.get_legend().remove()
except AttributeError:
pass
# Set limits
x_lim = (0, max_displayed_n_eval_scaled)
ax.set_xlim(x_lim)
y_lim = (-12.5, -10.5)
ax.set_ylim(y_lim)
# Plot model and experiment indication. Both values are not real data, just an example.
model_free_energy = -10.75
final_prediction = cb8_data_mean[cb8_data_mean['N energy evaluations'] == max_displayed_n_eval][DG_KEY].values[0]
ax.plot(x_lim, [model_free_energy]*2, color='gray', ls='--')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, model_free_energy, r'$\Delta$G$_{\theta}$')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, final_prediction - 0.13, r'$\overline{\Delta G}$')
# Plot experimental value horizontal line only for generic plot.
if plot_experimental_value:
experiment_dg = -11.75
plt.plot(x_lim, [experiment_dg]*2, color='black')
if cost == 'neval':
ax.set_xlabel('N force/energy evaluations')
else:
ax.set_xlabel('Computational cost', labelpad=-5)
ax.set_ylabel('$\Delta$G', labelpad=-5)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.tight_layout(pad=0.1, rect=[0.0, 0.0, 0.90, 1.0])
# Save file.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure 1 - host-guest')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'example_trajectories')
plt.savefig(output_base_path + '.pdf')
# =============================================================================
# FIGURE 2 - MEAN ERROR AND RELATIVE EFFICIENCY CARTOON
# =============================================================================
def plot_mean_error_cartoon():
"""Plot the cartoon used to explain mean error and relative efficiency.
This is used as an example to clarify some gotchas with the difinition
of efficiency.
"""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sns.set_context('paper')
sns.set_style('white')
def err_decay_func_square(decay_coeff, c):
return decay_coeff / np.sqrt(c)
def mean_error_square(decay_coeff, c_min, c_max):
return 2 * decay_coeff * (np.sqrt(c_max) - np.sqrt(c_min)) / (c_max - c_min)
def err_decay_func_B(decay_coeff, c):
return decay_coeff / c**(5/6)
def mean_error_B(decay_coeff, c_min, c_max):
return 6 * decay_coeff * (c_max**(1/6) - c_min**(1/6)) / (c_max - c_min)
decay_coeffs = {
'A': 1.0,
'B': 2.5,
'Z': 1.5,
}
c_ranges = collections.OrderedDict([
("A'", np.arange(1, 4.5, 0.1)),
("A''", np.arange(3, 6, 0.1)),
("B", np.arange(2, 6.5, 0.1)),
("Z", np.arange(1, 6.5, 0.1)),
])
# Determine colors colors.
colors = {m: 'C'+str(i) for i, m in enumerate(sorted(c_ranges))}
# Plot the error trajectories.
fig, ax = plt.subplots(figsize=(3.5, 2.6))
# method_names = ["B", "Z", "A'", "A''"]
method_names = ["Z", "A'", "A''"]
for method_name in method_names:
color = colors[method_name]
c_range = c_ranges[method_name]
decay_coeff = decay_coeffs[method_name[0]]
if method_name == 'B':
err_decay_func = err_decay_func_B
else:
err_decay_func = err_decay_func_square
err = err_decay_func(decay_coeff, c_range)
# Plot error area.
ax.plot(c_range, err, color=color, label=method_name, zorder=1)
ax.fill_between(c_range, err, 0, color=color, alpha=0.5, zorder=0)
# Add method label.
c_method_label_idx = int(len(c_range) / 8)
ax.text(c_range[c_method_label_idx], err[c_method_label_idx]+0.01, method_name, fontsize=12)
if method_name[0] == 'A':
# Plot mean error.
c_min, c_max = min(c_range), max(c_range)
mean_err = mean_error_square(decay_coeff, c_min, c_max)
# Start mean error horizontal line from the error curve.
c_mean = (decay_coeff / mean_err)**2
ax.plot([0, c_mean], [mean_err, mean_err], color='black', ls='--', alpha=0.8, zorder=1)
# Add label mean error.
# ax.text(1.05, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
ax.text(-0.3, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
# Add c_min/max labels.
ax.text(c_min-0.4, -0.1, 'c$_{min,' + method_name + '}$', fontsize=9)
ax.text(c_max-0.4, -0.1, 'c$_{max,' + method_name + '}$', fontsize=9)
# Configure axes.
ax.set_xlim(1, 6.4)
ax.set_ylim(0, 2)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylabel('$RMSE(\Delta G)$')
ax.set_xlabel('computational cost')
# Pull axes labels closest to axes.
ax.tick_params(axis='x', which='major', pad=2.0)
ax.yaxis.set_label_coords(0.0, 0.65)
# Plot the relative efficiencies in an inset plot.
ax_ins = inset_axes(ax, width='100%', height='100%', bbox_to_anchor=[145, 115, 90, 50])
# Compute relative efficiencies with respect to Z.
relative_efficiencies = collections.OrderedDict()
for method_name in [name for name in method_names if name != 'Z']:
c_min, c_max = min(c_ranges[method_name]), max(c_ranges[method_name])
if method_name == 'B':
mean_error_func = mean_error_B
else:
mean_error_func = mean_error_square
mean_err_method = mean_error_func(decay_coeffs[method_name[0]], c_min, c_max)
mean_err_Z = mean_error_square(decay_coeffs['Z'], c_min, c_max)
relative_efficiencies[method_name] = -np.log(mean_err_method/mean_err_Z)
# Plot horizontal bar plot with all efficiencies.
labels, rel_effs = zip(*relative_efficiencies.items())
bar_colors = [colors[m] for m in labels]
labels = [l + '/Z' for l in labels]
# labels = ['$e_{err,' + str(l) + '/Z}$' for l in labels]
ax_ins.barh(y=labels, width=rel_effs, color=bar_colors, alpha=0.85)
ax_ins.set_title('relative efficiency', pad=2.5)
# plt.tight_layout(rect=[0.0, 0.0, 1.0, 1.0])
plt.tight_layout(rect=[0.1, 0.0, 1.0, 1.0])
# Pull axes labels closest to axes.
ax_ins.set_xticks([0.0])
ax_ins.grid(axis='x')
ax_ins.tick_params(axis='x', which='major', pad=0.0)
ax_ins.tick_params(axis='y', which='major', pad=0.0)
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure2-efficiency_cartoon')
os.makedirs(output_dir_path, exist_ok=True)
plt.savefig(os.path.join(output_dir_path, 'error_trajectories.pdf'))
# =============================================================================
# FIGURE 3 - FREE ENERGY TRAJECTORIES
# =============================================================================
def plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=None,
plot_std=True, plot_bias=True, plot_bias_to_reference=False,
system_names=None):
"""Plot free energy trajectories, std, and bias of the given submissions."""
if system_names is None:
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
n_systems = len(system_names)
max_n_energy_evaluations = {system_name: 0 for system_name in system_names}
min_n_energy_evaluations = {system_name: np.inf for system_name in system_names}
# Handle default arguments.
if y_limits is None:
# 3 by 3 matrix of y limits for the plots.
y_limits = [[None for _ in range(n_systems)] for _ in range(n_systems)]
# We need a 2D array of axes for the code to work even if we're not plotting std or bias.
try:
axes_shape = len(axes.shape)
except AttributeError:
axes = np.array([[axes]])
else:
if axes_shape == 1:
axes = np.array([axes])
# Build a dictionary mapping submissions and system names to their mean data.
all_mean_data = {}
for submission in submissions:
# We always want to print in order
all_mean_data[submission.paper_name] = {}
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge.
if submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3':
continue
# Add mean free energies for this system.
system_mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
n_energy_evaluations = system_mean_data['N energy evaluations'].values[-1]
all_mean_data[submission.paper_name][system_name] = system_mean_data
# Keep track of the maximum and minimum number of energy evaluations,
# which will be used to determine how to truncate the plotted reference
# data and determine the zorder of the trajectories respectively.
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
min_n_energy_evaluations[system_name] = min(min_n_energy_evaluations[system_name],
n_energy_evaluations)
# Add also reference YANK calculations if provided.
if yank_analysis is not None:
all_mean_data[YANK_METHOD_PAPER_NAME] = {}
for system_name in system_names:
system_mean_data = yank_analysis.get_free_energies_from_energy_evaluations(
max_n_energy_evaluations[system_name], system_name=system_name, mean_trajectory=True)
all_mean_data[YANK_METHOD_PAPER_NAME][system_name] = system_mean_data
# Create a table mapping submissions and system name to the zorder used
# to plot the free energy trajectory so that smaller shaded areas are on
# top of bigger ones.
# First find the average CI for all methods up to min_n_energy_evaluations.
methods_cis = {name: {} for name in system_names}
for method_name, method_mean_data in all_mean_data.items():
for system_name, system_mean_data in method_mean_data.items():
# Find index of all energy evaluations < min_n_energy_evaluations.
n_energy_evaluations = system_mean_data['N energy evaluations'].values
last_idx = np.searchsorted(n_energy_evaluations, min_n_energy_evaluations[system_name], side='right')
cis = system_mean_data['$\Delta$G CI'].values[:last_idx]
methods_cis[system_name][method_name] = np.mean(cis)
# For each system, order methods from smallest CI (plot on top) to greatest CI (background).
zorders = {name: {} for name in system_names}
for system_name, system_cis in methods_cis.items():
ordered_methods = sorted(system_cis.keys(), key=lambda method_name: system_cis[method_name])
for zorder, method_name in enumerate(ordered_methods):
zorders[system_name][method_name] = zorder
# The columns are in order CB8-G3, OA-G3, and OA-G6.
system_columns = {'CB8-G3': 0, 'OA-G3': 1, 'OA-G6': 2}
# Plot submissions in alphabetical order to order he legend labels.
for method_name in sorted(all_mean_data.keys()):
submission_mean_data = all_mean_data[method_name]
submission_color = SUBMISSION_COLORS[method_name]
submission_ls = SUBMISSION_LINE_STYLES[method_name]
# Plot free energy trajectories.
for system_name, mean_data in submission_mean_data.items():
ax_idx = system_columns[system_name]
# The OA prediction of the NS short protocol are the same of the long protocol submission file.
if method_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3':
# Just add the label.
axes[0][ax_idx].plot([], color=submission_color, ls=submission_ls, label=method_name)
continue
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values[-1]
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
# Determine zorder and plot.
zorder = zorders[system_name][method_name]
plot_mean_data(mean_data, axes[:,ax_idx], color=submission_color,
ls=submission_ls, zorder=zorder, label=method_name,
plot_std=plot_std, plot_bias=plot_bias)
# Fix labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
if plot_bias:
axes[2][0].set_ylabel('bias [kcal/mol]')
central_column_idx = int(len(axes[0])/2)
axes[-1][central_column_idx].set_xlabel('number of energy/force evaluations [10$^6$]')
# Fix axes limits.
for ax_idx, system_name in enumerate(system_names):
for row_idx in range(len(axes)):
ax = axes[row_idx][ax_idx]
# Set the x-axis limits.
ax.set_xlim((0, max_n_energy_evaluations[system_name]/N_ENERGY_EVALUATIONS_SCALE))
# Keep the x-axis label only at the bottom row.
if row_idx != len(axes)-1:
ax.xaxis.set_ticklabels([])
y_lim = y_limits[row_idx][ax_idx]
if y_lim is not None:
ax.set_ylim(y_lim)
# Set the system name in the title.
axes[0][ax_idx].set_title(system_name)
# Create a bias axis AFTER the ylim has been set.
if yank_analysis is not None and plot_bias_to_reference:
for ax_idx, (system_name, ax) in enumerate(zip(system_names, axes[0])):
yank_full_mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
ref_free_energy = yank_full_mean_data[DG_KEY].values[-1]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to fix the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if ax_idx == n_systems - 1:
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
def plot_all_entries_trajectory(submissions, yank_analysis, zoomed=False):
"""Plot free energy trajectories, std, and bias of the challenge entries."""
sns.set_style('whitegrid')
sns.set_context('paper')
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
if zoomed:
figsize = (7.25, 7.0) # Without REVO
else:
figsize = (7.25, 7.0) # With REVO
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=figsize)
# Optionally, remove REVO.
if zoomed:
submissions = [s for s in submissions if s.name not in ['WExploreRateRatio']]
if zoomed:
# Y-axis limits when REVO calculations are excluded.
y_limits = [
[(-15, -10), (-9, -4), (-9, -4)],
[(0, 2), (0, 0.8), (0, 0.8)],
[(-3, 1), (-0.6, 0.6), (-0.6, 0.6)],
]
else:
# Y-axis limits when REVO calculations are included.
y_limits = [
[(-17, -9), (-13, -5), (-13, -5)],
[(0, 2), (0, 1.75), (0, 1.75)],
[(-4, 4), (-0.6, 0.6), (-0.6, 0.6)],
]
plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=y_limits)
# Show/save figure.
if zoomed:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92], w_pad=0.0) # Without REVO
else:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92]) # With REVO
# Plot legend.
if zoomed:
# bbox_to_anchor = (2.52, 1.55) # Without REVO.
bbox_to_anchor = (2.4, 1.48)
else:
bbox_to_anchor = (2.4, 1.48) # With REVO.
axes[0][1].legend(loc='upper right', bbox_to_anchor=bbox_to_anchor,
fancybox=True, ncol=4)
plt.subplots_adjust(wspace=0.35)
# plt.show()
if zoomed:
file_name = 'Figure3-free_energy_trajectories_zoomed'
else:
file_name = 'Figure3-free_energy_trajectories'
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure3-free_energy_trajectories')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, file_name)
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 4 - NONEQUILIBRIUM SWITCHING ESTIMATOR COMPARISON
# =============================================================================
def plot_all_nonequilibrium_switching(submissions):
"""Plot free energy trajectories, std, and bias of the nonequilibrium-switching calculations."""
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
figsize = (7.25, 3.5)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
# Select nonequilibrium-switching calculations with estimators.
submissions = [s for s in submissions if 'NS' in s.paper_name]
# Y-axis limits.
y_limits = [
[(-20, 5), (-40, 0), (-40, 0)]
]
plot_submissions_trajectory(submissions, yank_analysis=None, axes=axes,
y_limits=y_limits, plot_std=False, plot_bias=False)
# Show/save figure.
plt.tight_layout(pad=0.0, rect=[0.0, 0.00, 1.0, 0.85])
# Plot legend.
legend = axes[0].legend(loc='upper left', bbox_to_anchor=(0.6, 1.3),
fancybox=True, ncol=3)
# Change legend labels to refer to estimator used rather than overall method ID.
legend_labels_map = {
'GROMACS/NS-DS/SB-long': 'BAR-long',
'GROMACS/NS-DS/SB': 'BAR',
'GROMACS/NS-Jarz-F': 'Jarzynski-Forward',
'GROMACS/NS-Jarz-R': 'Jarzynski-Reverse',
'GROMACS/NS-Gauss-F': 'Gaussian-Forward',
'GROMACS/NS-Gauss-R': 'Gaussian-Reverse',
}
for text in legend.get_texts():
text.set_text(legend_labels_map[text.get_text()])
plt.subplots_adjust(wspace=0.35)
# plt.show()
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure4-nonequilibrium_comparison')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'Figure4-nonequilibrium_comparison')
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 5 - BAROSTAT AND RESTRAINT
# =============================================================================
# Directories containing the volume information of YANK and GROMACS/EE.
BAROSTAT_DATA_DIR_PATH = os.path.join('..', 'SAMPLing', 'Data', 'BarostatData')
YANK_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'YankVolumes')
EE_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'EEVolumes')
def plot_volume_distributions(axes, plot_predicted=False):
"""Plot the volume distributions obtained with Monte Carlo and Berendsen barostat."""
import scipy.stats
import scipy.integrate
from simtk import unit
# Load data.
mc_volumes = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure100.npy'))),
(100, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure10000.npy'))),
])
mc_volumes_hrex = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state0.npy'))),
(58, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state58.npy'))),
])
b_volumes = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_vanilla.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_vanilla.npy'))),
])
b_volumes_ee = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_expanded.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_expanded.npy'))),
])
# Print some statistics for each distribution.
for volume_trajectories, label in [(mc_volumes, 'MC-MD '),
(mc_volumes_hrex, 'MC-HREX'),
(b_volumes, 'BB-MD '),
(b_volumes_ee, 'BB-EE ')]:
for pressure, trajectory in volume_trajectories.items():
n = len(trajectory)
t_stat = 2.326 # 98% CI
mean = np.mean(trajectory)
sem = scipy.stats.sem(trajectory)
mean_ci = t_stat * sem
var = np.var(trajectory, ddof=1)
# Standard error of variance if volume is gaussianly distributed
sev = var * np.sqrt(2 / (n-1))
var_ci = t_stat * sev
skew = scipy.stats.skew(trajectory)
# Standard error of skewness if volume is gaussianly distributed
ses = np.sqrt( 6*n*(n-1) / ((n-2)*(n+1)*(n+3)) )
skew_ci = t_stat * ses
print('{}-{} (n={}): mean={:.3f} +- {:.3f}nm^3\t\tvar={:.3f} +- {:.3f}\tskew={:.3f} +- {:.3f}'.format(
pressure, label, n, mean, mean_ci, var, var_ci, skew, skew_ci))
# Plot the 1atm vs 100atm comparison.
barostats = ['B', 'MC']
for ax, volume_trajectories, barostat in zip(axes, [b_volumes, mc_volumes], barostats):
barostat += ',MD'
barostat = 'MD'
for pressure, trajectory in volume_trajectories.items():
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)'.format(barostat, pressure)
ax = sns.distplot(trajectory, label=label, hist=False, ax=ax)
if plot_predicted:
# Plot predicted distribution.
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * 298.15*unit.kelvin)
p1 = 1.0 * unit.atmosphere
p2 = 100.0 * unit.atmosphere
volumes = np.linspace(78.0, 82.0, num=200)
fit = scipy.stats.norm
# Fit the original distribution.
original_pressure, new_pressure = list(volume_trajectories.keys())
original_trajectory = list(volume_trajectories.values())[0]
fit_parameters = fit.fit(original_trajectory)
# Find normalizing constant predicted distribution.
predicted_distribution = lambda v: np.exp(-beta*(p2 - p1)*v*unit.nanometer**3) * fit.pdf([v], *fit_parameters)
normalizing_factor = scipy.integrate.quad(predicted_distribution, volumes[0], volumes[-1])[0]
predicted = np.array([predicted_distribution(v) / normalizing_factor for v in volumes])
# Set the scale.
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)$\cdot e^{{\\beta ({}atm - {}atm) V}}$'.format(barostat, original_pressure, new_pressure, original_pressure)
ax.plot(volumes, predicted, ls='--', label=label)
# ax.plot(volumes, [fit.pdf([v], *fit_parameters) for v in volumes], label='original')
# Plot comparison MD vs expanded ensemble and HREX volumes.
for ax_idx, (trajectory, label) in enumerate([
(b_volumes_ee[1], 'B,EE'), (mc_volumes_hrex[1], 'MC,HREX')
]):
label = 'E'
ax = axes[ax_idx]
label = '$\\rho_{{\mathrm{{{}}}}}$(V|1atm)'.format(label)
sns.distplot(trajectory, label=label, hist=False, ax=ax)
# Set titles and configure axes.
axes[0].set_title('Berendsen barostat volume distribution', pad=2.0)
axes[1].set_title('Monte Carlo barostat volume distribution', pad=2.0)
for ax_idx in range(len(axes)):
axes[ax_idx].set_xlim((78.8, 81.2))
axes[ax_idx].set_ylim((0.0, 6.0))
axes[ax_idx].set_ylabel('density')
axes[0].set_xlabel('', labelpad=0.3)
axes[1].set_xlabel('Volume [nm^3]', labelpad=0.3)
# Create single legend for both MC and B barostat axes.
bbox_to_anchor = (-0.1, -0.15)
axes[0].legend(fontsize='xx-small', loc='upper left', bbox_to_anchor=bbox_to_anchor, ncol=4,
fancybox=True, labelspacing=0.7, handletextpad=0.4, columnspacing=1.1,)
# axes[0].get_legend().remove()
axes[1].get_legend().remove()
plt.tight_layout(pad=0, rect=[0.0, 0.0, 1.0, 1.0])
# Directory with the restraint information.
RESTRAINT_DATA_DIR_PATH = os.path.join('YankAnalysis', 'RestraintAnalysis')
# The state index of the discharged state with LJ interactions intact.
DISCHARGED_STATE = {
'CB8-G3': 25,
'OA-G3': 32,
'OA-G6': 29
}
# The final free energy predictions without restraint unbiasing.
BIASED_FREE_ENERGIES = {
'CB8-G3-0': -10.643,
'CB8-G3-1': -10.533,
'CB8-G3-2': -10.463,
'CB8-G3-3': None, # TODO: Run the biased analysis
'CB8-G3-4': -10.324,
'OA-G3-0': -5.476,
'OA-G3-1': -5.588,
'OA-G3-2': -5.486,
'OA-G3-3': -5.510,
'OA-G3-4': -5.497,
'OA-G6-0': -5.669,
'OA-G6-1': -5.665,
'OA-G6-2': -5.767,
'OA-G6-3': -5.737,
'OA-G6-4': -5.788,
}
def plot_restraint_distance_distribution(system_id, ax, kde=True, iteration_set=None):
"""Plot the distribution of restraint distances at bound, discharged, and decoupled states.
Return the 99.99-percentile restraint radius that was used as a cutoff during analysis.
"""
n_iterations = YANK_N_ITERATIONS + 1 # Count also iteration 0.
system_name = system_id[:-2]
discharged_state_idx = DISCHARGED_STATE[system_name]
# Load all distances cached during the analysis.
cache_dir_path = os.path.join('pkganalysis', 'cache', system_id.replace('-', ''))
cached_distances_file_path = os.path.join(cache_dir_path, 'restraint_distances_cache.npz')
distances_kn = np.load(cached_distances_file_path)['arr_0']
# Distances are in nm but we plot in Angstrom.
distances_kn *= 10
n_states = int(len(distances_kn) / n_iterations)
# Use the same colors that are used in the water analysis figures.
color_palette = sns.color_palette('viridis', n_colors=n_states)
color_palette = [color_palette[i] for i in (0, discharged_state_idx, -1)]
# Isolate distances in the bound, discharged (only LJ), and decoupled state.
distances_kn_bound = distances_kn[:n_iterations]
distances_kn_discharged = distances_kn[(discharged_state_idx-1)*n_iterations:discharged_state_idx*n_iterations]
distances_kn_decoupled = distances_kn[(n_states-1)*n_iterations:]
# Filter iterations.
if iteration_set is not None:
distances_kn_bound = distances_kn_bound[iteration_set]
distances_kn_discharged = distances_kn_discharged[iteration_set]
distances_kn_decoupled = distances_kn_decoupled[iteration_set]
assert len(distances_kn_bound) == len(distances_kn_decoupled)
# Plot the distributions.
# sns.distplot(distances_kn, ax=ax, kde=True, label='all states')
sns.distplot(distances_kn_bound, ax=ax, kde=kde, label='bound', color=color_palette[0])
sns.distplot(distances_kn_discharged, ax=ax, kde=kde, label='discharged', color=color_palette[1])
sns.distplot(distances_kn_decoupled, ax=ax, kde=kde, label='decoupled', color=color_palette[2])
# Plot the threshold used for analysis, computed as the
# 99.99-percentile of all distances in the bound state.
distance_cutoff = np.percentile(a=distances_kn_bound, q=99.99)
limits = ax.get_ylim()
ax.plot([distance_cutoff for _ in range(100)],
np.linspace(limits[0], limits[1]/2, num=100), color='black')
return distance_cutoff
def plot_restraint_profile(system_id, ax, restraint_cutoff):
"""Plot the free energy as a function of the restraint cutoff."""
# Load the free energy profile for this system.
restraint_profile_file_path = os.path.join(RESTRAINT_DATA_DIR_PATH,
system_id.replace('-', '') + '.json')
with open(restraint_profile_file_path, 'r') as f:
free_energies_profile = json.load(f)
# Reorder the free energies by increasing cutoff and convert str keys to floats.
free_energies_profile = [(float(d), f) for d, f in free_energies_profile.items()]
free_energies_profile = sorted(free_energies_profile, key=lambda x: x[0])
distance_cutoffs, free_energies = list(zip(*free_energies_profile))
f, df = list(zip(*free_energies))
# Convert string to floats.
distance_cutoffs = [float(c) for c in distance_cutoffs]
# Plot profile.
ax.errorbar(x=distance_cutoffs, y=f, yerr=df, label='after reweighting')
# Plot biased free energy
biased_f = BIASED_FREE_ENERGIES[system_id]
x = np.linspace(*ax.get_xlim())
ax.plot(x, [biased_f for _ in x], label='before reweighting')
# Plot restraint distance cutoff.
limits = ax.get_ylim()
x = [restraint_cutoff for _ in range(100)]
y = np.linspace(limits[0], limits[1], num=100)
ax.plot(x, y, color='black')
def plot_restraint_analysis(system_id, axes):
"""Plot distribution of restraint distances and free energy profile on two axes."""
# Histograms of restraint distances/energies.
ax = axes[0]
kde = True
restraint_cutoff = plot_restraint_distance_distribution(system_id, ax, kde=kde)
# Set restraint distance distribution lables and titles.
ax.set_title('Restrained ligand-receptor distance', pad=2.0)
if kde is False:
ax.set_ylabel('Number of samples')
else:
ax.set_ylabel('density')
ax.legend(loc='upper right', fontsize='x-small')
ax.set_xlabel('Restrained distance [$\mathrm{\AA}$]', labelpad=0.3)
# Free energy as a function of restraint distance.
ax = axes[1]
ax.set_title('$\Delta G$ as a function of restraint radius cutoff', pad=2.0 )
plot_restraint_profile(system_id, ax, restraint_cutoff)
# Labels and legend.
ax.set_xlabel('Restraint radius cutoff [$\mathrm{\AA}$]', labelpad=0.3)
ax.set_ylabel('$\Delta G$ [kcal/mol]')
ax.legend(fontsize='x-small')
def plot_restraint_and_barostat_analysis():
"""Plot the Figure showing info for the restraint and barostat analysis."""
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('whitegrid')
sns.set_context('paper', font_scale=1.0)
# Create two columns, each of them share the x-axis.
fig = plt.figure(figsize=(7.25, 4))
# Restraint distribution axes.
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223, sharex=ax1)
barostat_axes = [ax1, ax2]
# Volume distribution axes.
ax3 = fig.add_subplot(222)
ax4 = fig.add_subplot(224, sharex=ax3)
restraint_axes = [ax3, ax4]
# Plot barostat analysis.
plot_volume_distributions(barostat_axes, plot_predicted=True)
# Plot restraint analysis.
system_id = 'OA-G3-0'
plot_restraint_analysis(system_id, restraint_axes)
# Configure axes.
restraint_axes[0].set_xlim((0, 10.045))
restraint_axes[1].set_ylim((-7, -3.9))
for ax in restraint_axes + barostat_axes:
ax.tick_params(axis='x', which='major', pad=0.1)
ax.tick_params(axis='y', which='major', pad=0.1)
plt.tight_layout(pad=0.3)
# plt.show()
output_file_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure5-restraint_barostat',
'restraint_barostat.pdf')
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
# =============================================================================
# FIGURE 6 - HREX INITIAL BIAS
# =============================================================================
def plot_yank_system_bias(system_name, data_dir_paths, axes, shift_to_origin=True, plot_std=True):
"""Plot the YANK free energy trajectoies when discarding initial samples for a single system."""
color_palette = sns.color_palette('viridis', n_colors=len(data_dir_paths)+1)
# Plot trajectories with truncated data.
all_iterations = set()
for data_idx, data_dir_path in enumerate(data_dir_paths):
yank_analysis = YankSamplingAnalysis(data_dir_path)
# In the YankAnalysis folder, each analysis starting from
# iteration N is in the folder "iterN/".
last_dir_name = os.path.basename(os.path.normpath(data_dir_path))
label = last_dir_name[4:]
# First color is for the full data.
color = color_palette[data_idx+1]
# Collect all iterations that we'll plot for the full data.
mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
all_iterations.update(mean_data['HREX iteration'].values.tolist())
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label=label, plot_std=plot_std, plot_bias=False, plot_ci=False)
# Plot trajectory with full data.
color = color_palette[0]
# Plot an early iteration and all the iterations analyzed for the bias.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
system_ids = [system_name + '-' + str(i) for i in range(5)]
first_iteration = yank_analysis.get_system_iterations(system_ids[0])[2]
iterations = [first_iteration] + sorted(all_iterations)
mean_data = yank_analysis._get_free_energies_from_iterations(
iterations, system_ids, mean_trajectory=True)
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
# Simulate ploatting starting from the origin.
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label='0', plot_std=plot_std, plot_bias=False, plot_ci=False)
axes[0].set_title(system_name)
def plot_yank_bias(plot_std=True, figure_dir_path=None):
"""Plot YANK free energy trajectories when discarding initial samples."""
# In the first column, plot the "unshifted" trajectory of CB8-G3,
# with all sub-trajectories shifted to the origin. In the second
# and third columns, plot the trajectories of CB8-G3 and OA-G3
# with all sub-trajectories shifted to the origin.
what_to_plot = [
('CB8-G3', False),
# ('CB8-G3', True),
('OA-G3', False),
# ('OA-G3', False),
('OA-G6', False),
]
if plot_std:
n_rows = 2
else:
n_rows = 1
n_cols = len(what_to_plot)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(7.25, 4.0))
# The loops are based on a two dimensional array of axes.
if n_rows == 1:
axes = np.array([axes])
# Sort paths by how many samples they have.
data_dir_paths = ['YankAnalysis/BiasAnalysis/iter{}/'.format(i) for i in [1000, 2000, 4000, 8000, 16000, 24000]]
for column_idx, (system_name, shift_to_origin) in enumerate(what_to_plot):
plot_yank_system_bias(system_name, data_dir_paths, axes[:,column_idx],
shift_to_origin=shift_to_origin, plot_std=plot_std)
title = system_name + ' (shifted)' if shift_to_origin else system_name
axes[0,column_idx].set_title(title)
# Fix axes limits and labels.
ylimits = {
'CB8-G3': (-12.5, -10.5),
'OA-G3': (-8, -6),
'OA-G6': (-8, -6)
}
for column_idx, (system_name, _) in enumerate(what_to_plot):
axes[0][column_idx].set_ylim(ylimits[system_name])
if plot_std:
axes[1][column_idx].set_ylim((0, 0.6))
for row_idx, ax_idx in itertools.product(range(n_rows), range(n_cols)):
# Control the number of ticks for the x axis.
axes[row_idx][ax_idx].locator_params(axis='x', nbins=4)
# Set x limits for number of iterations.
axes[row_idx][ax_idx].set_xlim((0, YANK_N_ITERATIONS))
# Remove ticks labels that are shared with the last row.
for row_idx, ax_idx in itertools.product(range(n_rows-1), range(n_cols)):
axes[row_idx][ax_idx].set_xticklabels([])
# Set axes labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
# If there is an odd number of columns print x label only on the central one.
if n_cols % 2 == 1:
axes[-1][1].set_xlabel('HREX iteration')
else:
for ax in axes[-1]:
ax.set_xlabel('HREX iteration')
plt.tight_layout(h_pad=0.1, rect=[0.0, 0.00, 1.0, 0.91])
handles, labels = axes[0][0].get_legend_handles_labels()
handles = [handles[-1]] + handles[:-1]
labels = [labels[-1]] + labels[:-1]
bbox_to_anchor = (0.4, 1.53)
axes[0][0].legend(handles, labels, loc='upper left', bbox_to_anchor=bbox_to_anchor,
title='number of discarded initial iterations', ncol=len(data_dir_paths)+1,
fancybox=True, labelspacing=0.8, handletextpad=0.5, columnspacing=1.2,
fontsize='small')
# plt.show()
if figure_dir_path is None:
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure6-bias_hrex')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_path = os.path.join(figure_dir_path, 'Figure6-bias_hrex')
plt.savefig(output_file_path + '.pdf')
# plt.savefig(output_file_path + '.png', dpi=600)
# =============================================================================
# SUPPORTING INFORMATION - EXAMPLE OF HREX BIAS
# =============================================================================
def simulate_correlation_samples():
"""Simulation of bias from same initial configuration.
There are 3 states as different harmonic oscillators, but all
or almost all the samples come from the first (bound) state to
simulate what happens when they don't decorrelate fast enough.
The hypothesis is that most is that starting from the bound
state causes the initial free energy to be artificially negative
if the correlation times are long.
The second (discharged) state is just a shifted harmonic oscillator
(same free energy as bound state). The third (unbound) is shifted
and has much higher entropy.
"""
from numpy.random import normal
from pymbar import MBAR
def harmonic_oscillator_free_energy(sigma):
"""Analytical expression for the free energy of a harmonic oscillator."""
#return - np.log(2 * np.pi * sigma**2) * 3.0 / 2.0 # 3D oscillator
return - np.log(np.sqrt(2 * np.pi) * sigma)
def harmonic_oscillator_potential(x, loc, std):
"""Compute potential of the given positions given location
and standard deviation of the Gaussian distribution.
Potentials are returned in units of kT.
"""
spring_constant = 1 / std**2
return spring_constant / 2.0 * (x - loc)**2
def print_free_energies(Deltaf_ij, dDeltaf_ij):
mbar_str = ', '.join(['{:.4f} +- {:.4f}'.format(f, df) for f, df in zip(Deltaf_ij[:,0], dDeltaf_ij[:,0])])
print('MBAR :', mbar_str)
analytical_str = ', '.join(['{:.4f} '.format(f) for f in analytical_Deltaf])
print('Analytical:', analytical_str)
def compute_mbar_free_energy(all_samples, shifts, stds, analytical_f):
n_states = len(all_samples)
# u_kn[k,n] is the reduced potential energy n-th sample evaluated at state k.
u_kn = np.empty(shape=(n_states, n_states*n_samples))
# Convert samples to potentials.
for k in range(n_states):
for sampled_k, samples in enumerate(all_samples):
start = sampled_k * n_samples
end = (sampled_k + 1) * n_samples
u_kn[k,start:end] = harmonic_oscillator_potential(samples, loc=shifts[k], std=stds[k])
# Compute MBAR free energy.
N_k = np.array([n_samples] * n_states)
mbar = MBAR(u_kn, N_k=N_k, initial_f_k=analytical_f)
Deltaf_ij, dDeltaf_ij, _ = mbar.getFreeEnergyDifferences()
return Deltaf_ij, dDeltaf_ij
# Determine standard deviation and shift of the harmonic distributions.
n_samples = 5000000
stds = np.array([2.0, 2.0, 5.0])
shifts = np.array([0.0, 2.0, 2.0])
print('\nspring constants:', 1 / stds**2)
# Compute analytical free energy.
analytical_f = np.array([harmonic_oscillator_free_energy(s) for s in stds])
analytical_Deltaf = np.array([analytical_f[0] - analytical_f[i] for i in range(len(stds))])
# FIRST TEST.
# Sample from all states and verify that MBAR free energy is correct.
# -------------------------------------------------------------------
all_samples = [normal(loc=l, scale=s, size=n_samples) for l, s in zip(shifts, stds)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# SECOND TEST.
# Check if the bias is not due to lack of overlap. If we sample only the end states the estimate should be correct.
# -----------------------------------------------------------------------------------------------------------------
for i in range(1, len(all_samples)):
all_samples_bar = [all_samples[0], all_samples[i]]
shifts_bar = [shifts[0], shifts[i]]
stds_bar = [stds[0], stds[i]]
analytical_f_bar = [analytical_f[0], analytical_f[i]]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples_bar, shifts_bar, stds_bar, analytical_f_bar)
print('\nBAR_{}0'.format(i))
print_free_energies(Deltaf_ij, dDeltaf_ij)
# THIRD TEST.
# Now sample from only the bound state to see how the free energy changes.
# ------------------------------------------------------------------------
all_samples[1:] = [normal(loc=shifts[0], scale=stds[0], size=n_samples) for _ in range(len(stds)-1)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# FOURTH TEST.
# Now let the unbound state decorrelate fast (i.e. sample from its own distribution).
# -----------------------------------------------------------------------------------
all_samples[-1] = normal(loc=shifts[-1], scale=stds[-1], size=n_samples)
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# RESULT: SUCCESS!!!
# =============================================================================
# SUPPORTING INFORMATION - COMPLEX/SOLVENT and ENTROPY/ENTHALPY DECOMPOSITION
# =============================================================================
def _mean_data_decomposition(data):
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
try:
# This may fail if we have computed different iterations for each.
data = np.array(data, dtype=np.float)
except ValueError:
data_lengths = [len(x) for x in data]
print('Warning: Truncating data of shape {}'.format(data_lengths))
min_length = min(data_lengths)
data = [x[:min_length] for x in data]
data = np.array(data, dtype=np.float)
# Compute std and mean along the trajectory ignoring NaNs.
return np.nanmean(data, axis=0), np.nanstd(data, axis=0)
def _plot_phase_decomposition(ax, phase_free_energies):
# Shortcuts.
data = phase_free_energies
label = '$\Delta$G'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
phase_axes = {
'complex': ax.twinx(),
'solvent': ax.twinx()
}
phase_colors = {
'complex': 'C1',
'solvent': 'C0',
}
for ax_name in sorted(phase_axes):
phase_axes[ax_name].set_ylabel(label + ' ' + ax_name + ' [kcal/mol]',
color=phase_colors[ax_name])
phase_axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Compute total free energy summing complex and solvent for all replicates.
total_mean = [np.array(data['solvent'][i]) + np.array(data['complex'][i]) for i in range(5)]
total_mean, total_std = _mean_data_decomposition(total_mean)
# Compute and plot the phase free energy.
for phase_name in ['complex', 'solvent']:
color = phase_colors[phase_name]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
# Plot each phase data on a separate axis to make the comparison easier.
phase_axes[phase_name].plot(data[phase_name], ls='-', color=color,
label=label + ' ' + phase_name)
# Plot uncertainties.
phase_axes[phase_name].fill_between(x=list(range(len(std))), y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color='black', label=label+' total')
ax.plot(total_mean, color='black', label=label+' total')
ax.fill_between(x=list(range(len(total_std))), y1=total_mean-total_std,
y2=total_mean+total_std, color='black', alpha=0.7)
ax.set_ylabel(label + ' total [kcal/mol]')
ax.set_xlabel('simulation percentage')
# Make the range of all y axes the same.
ax.set_ylim((-21, -18))
phase_axes['complex'].set_ylim((-151.0, -148.0))
phase_axes['solvent'].set_ylim((129.0, 132.0))
def _plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy):
# Analyze only the complex.
phase_name = 'complex'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
axes = {
'$\Delta$G': ax,
'$\Delta$H': ax.twinx(),
'-T$\Delta$S': ax.twinx(),
}
colors = {
'$\Delta$G': 'black',
'$\Delta$H': 'C1',
'-T$\Delta$S': 'C0',
}
for ax_name in sorted(axes):
axes[ax_name].set_ylabel(ax_name + ' ' + phase_name + ' [kcal/mol]', color=colors[ax_name])
axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Variable used to propagate entropy decomposition.
entropy_std = []
# Plot the total average free energy and enthalpy and for each phase.
for data, label in [(phase_free_energies, '$\Delta$G'),
(phase_enthalpy, '$\Delta$H')]:
color = colors[label]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
ns_replica = np.arange(0.0, 40.0, 40/len(std))
# Plot each phase data on a separate axis to make the comparison easier.
axes[label].plot(ns_replica, data[phase_name], ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Propagate uncertainty.
if len(entropy_std) == 0:
entropy_std = std**2
else:
entropy_std += std**2
entropy_std = np.sqrt(entropy_std)
# Plot also entropies.
label = '-T$\Delta$S'
color = colors[label]
entropy = phase_free_energies[phase_name] - phase_enthalpy[phase_name]
axes[label].plot(ns_replica, entropy, ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=entropy-entropy_std,
y2=entropy+entropy_std, color=color, alpha=0.7)
ax.set_xlabel('ns/replica')
def plot_decomposition(system_name, starting_iteration, type, output_file_path):
"""
Decomposition of the free energy trajectory in complex/solvent phase or entropy/enthalpy.
Parameters
----------
type : str
Can be 'entropy-enthalpy' or 'phase'.
"""
data_file_pattern = 'YankAnalysis/BiasAnalysis/iter{}/fe-decomposition-{}-{{}}.json'.format(
starting_iteration, system_name)
n_replicates = 5
phase_free_energies = {'complex': [[] for _ in range(n_replicates)],
'solvent': [[] for _ in range(n_replicates)]}
phase_enthalpy = copy.deepcopy(phase_free_energies)
for replicate_idx in range(n_replicates):
# Read decomposition data.
decomposition_data_file_path = data_file_pattern.format(replicate_idx)
with open(decomposition_data_file_path, 'r') as f:
decomposition_data = json.load(f)
# Read free energy and enthalpy at each iteration.
sorted_decomposition_data = sorted(decomposition_data, key=lambda x: int(x.split('-')[1]))
for phase_iter in sorted_decomposition_data:
decomposition = decomposition_data[phase_iter]
phase_name, iteration = phase_iter.split('-')
# Correct sign consistent with thermodynamic cycle.
if phase_name == 'complex':
sign = -1
else:
sign = 1
corrected_free_energy = sign * (decomposition['DeltaF'] + decomposition['DeltaF_standard_state_correction'])
phase_free_energies[phase_name][replicate_idx].append(corrected_free_energy)
# Multiplication works only if enthalpy is not None.
if decomposition['DeltaH'] is not None:
decomposition['DeltaH'] *= sign
phase_enthalpy[phase_name][replicate_idx].append(decomposition['DeltaH'])
# Create figure.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.25, 4.6))
if type == 'entropy-enthalpy':
_plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy)
else:
_plot_phase_decomposition(ax, phase_free_energies)
# # Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color=color, label=label)
# totals.append(total)
# Plot also entropies.
# ax.plot(totals[0] - totals[1], color='blue', label='-T$\Delta$S')
# ax.set_ylim((-20, -18))
# phase_axes['complex'].set_ylim((-153, -148))
# phase_axes['solvent'].set_ylim((128, 133))
# ax.set_ylim((-23, -18))
# phase_axes['complex'].set_ylim((30, 45))
# phase_axes['solvent'].set_ylim((-55, -40))
# ax.legend()
plt.tight_layout()
if output_file_path is not None:
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
else:
plt.show()
# =============================================================================
# RELATIVE EFFICIENCY ANALYSIS
# =============================================================================
def get_relative_efficiency_input(submission, yank_analysis, system_name):
"""Prepare the data to compute the mean relative efficiencies for this system."""
# For GROMACS/EE-fullquil we need to account for the extra equilibration
# cost and shift all energy evaluation to the right.
if submission.paper_name == 'GROMACS/EE-fullequil':
mean_free_energies = submission.mean_free_energies()
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_shifted = mean_data['N energy evaluations'].values[0]
last_shifted = mean_data['N energy evaluations'].values[-1]
calibration_cost = first_shifted*100/99 - last_shifted/99
else:
calibration_cost = 0
# Isolate the data for the system.
data_sub = submission.data[submission.data['System name'] == system_name]
n_energy_evaluations = max(data_sub['N energy evaluations'])
data_ref = yank_analysis.get_free_energies_from_energy_evaluations(
n_energy_evaluations, system_name=system_name, mean_trajectory=False,
start=calibration_cost)
# Obtain the free energies for the submission.
n_replicates = 5
free_energy_sub = np.empty(shape=(n_replicates, 100))
free_energy_ref = np.empty(shape=(n_replicates, 100))
for data, free_energy in [
(data_sub, free_energy_sub),
(data_ref, free_energy_ref),
]:
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = data[data['System ID'] == system_id]
free_energy[i] = system_id_data[DG_KEY].values
# Discard the initial frames of REVO and GROMACS/EE that don't have predictions.
from pkganalysis.efficiency import discard_initial_zeros
free_energy_ref, free_energy_sub = discard_initial_zeros(free_energy_ref, free_energy_sub)
# Determine the actual asymptotic free energy of YANK.
asymptotic_free_energy_ref = yank_analysis.get_reference_free_energies()[system_name]
return free_energy_ref, free_energy_sub, asymptotic_free_energy_ref
def compute_all_relative_efficiencies(
free_energy_A, free_energy_B, ci, n_bootstrap_samples,
asymptotic_free_energy_A=None, asymptotic_free_energy_B=None
):
from pkganalysis.efficiency import EfficiencyAnalysis
analysis = EfficiencyAnalysis(free_energy_A, free_energy_B,
asymptotic_free_energy_A,
asymptotic_free_energy_B)
std_rel_eff = analysis.compute_std_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
abs_bias_rel_eff = analysis.compute_abs_bias_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
rmse_rel_eff = analysis.compute_rmse_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
if ci is None:
rel_eff = [std_rel_eff, abs_bias_rel_eff, rmse_rel_eff]
return rel_eff
else:
rel_eff = [std_rel_eff[0], abs_bias_rel_eff[0], rmse_rel_eff[0]]
cis = [std_rel_eff[1], abs_bias_rel_eff[1], rmse_rel_eff[1]]
return rel_eff, cis
def plot_relative_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000,
same_plot=False, step_cumulative=2):
sns.set_style('whitegrid')
sns.set_context('paper')
statistic_names = ['std', 'absolute bias', 'RMSE']
# Create output directory.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
# Check if we need all the efficiencies in the same plot or not.
if same_plot:
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
# Keep track of data range by statistic.
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
# Keep track of n_energy_evaluations by column.
max_n_energy_evaluations = [0 for _ in range(3)]
for submission in submissions:
if submission.paper_name in {'OpenMM/REVO'}:
continue
# if submission.paper_name in {'AMBER/APR', 'GROMACS/NS-DS/SB', 'GROMACS/NS-DS/SB-long',
# 'NAMD/BAR', 'GROMACS/EE', 'GROMACS/EE-fullequil', 'OpenMM/SOMD'}:
# continue
print(submission.paper_name)
system_names = submission.data['System name'].unique()
# Create figure.
if not same_plot:
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name:
system_names = system_names[~(system_names == 'CB8-G3')]
fig, axes = plt.subplots(nrows=3, ncols=len(system_names),
figsize=(7.25, 8))
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
for col_idx, system_name in enumerate(system_names):
color = SUBMISSION_COLORS[submission.paper_name]
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and system_name != 'CB8-G3':
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
rel_eff = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Use the same asymptotic free energies to compute the absolute bias
# relative efficiency as a function of the simulation length.
asymptotic_free_energy_sub = free_energy_sub.mean(axis=0)[-1]
# # Print relative efficiencies.
# print(system_name, ci)
# if ci is not None:
# for rel_eff, bounds in zip(rel_eff, cis):
# print('\t', rel_eff, bounds.tolist())
# else:
# for rel_eff in rel_eff:
# print('\t', rel_eff)
# Compute mean efficiencies as a function of the length of the simulation.
n_costs = free_energy_ref.shape[1]
n_rel_eff = int(n_costs / step_cumulative)
relative_efficiencies = np.empty(shape=(3, n_rel_eff))
low_bounds = np.empty(shape=(3, n_rel_eff))
high_bounds = np.empty(shape=(3, n_rel_eff))
for i, c in enumerate(range(step_cumulative-1, n_costs, step_cumulative)):
c1 = c + 1
rel_eff = compute_all_relative_efficiencies(
free_energy_ref[:,:c1], free_energy_sub[:,:c1],
ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref,
asymptotic_free_energy_B=asymptotic_free_energy_sub
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Update CI lower and upper bound.
relative_efficiencies[:,i] = rel_eff
if ci is not None:
low_bounds[:,i] = [x[0] for x in cis]
high_bounds[:,i] = [x[1] for x in cis]
# Get number of energy evaluations.
mean_data = submission.mean_free_energies(system_name=system_name)
# Check how many initial iteration have been discarded.
discarded_iterations = 100 - n_costs
n_energy_evaluations = mean_data['N energy evaluations'].values[
discarded_iterations+1::step_cumulative] / 1e6
for row_idx, rel_eff in enumerate(relative_efficiencies):
ax = axes[row_idx][col_idx]
ax.plot(n_energy_evaluations, rel_eff, color=color, label=submission.paper_name)
# Plot back line at 0.
ax.plot(n_energy_evaluations, [0 for _ in n_energy_evaluations], color='black', ls='--')
# Update data range.
statistic_range = statistic_ranges[statistic_names[row_idx]]
# if ci is None:
# min_rel_eff = min(rel_eff)
# max_rel_eff = max(rel_eff)
# else:
# min_rel_eff = min(*rel_eff, *low_bounds[row_idx])
# max_rel_eff = max(*rel_eff, *high_bounds[row_idx])
statistic_range[0] = min(statistic_range[0], min(rel_eff))
statistic_range[1] = max(statistic_range[1], max(rel_eff))
# Update x-axis range.
if same_plot:
max_n_energy_evaluations[col_idx] = max(max_n_energy_evaluations[col_idx],
n_energy_evaluations[-1])
else:
for row_idx in range(len(statistic_names)):
axes[row_idx][col_idx].set_xlim((0, n_energy_evaluations[-1]))
if ci is not None:
# Plot confidence intervals.
for row_idx, (low_bound_c, high_bound_c) in enumerate(zip(low_bounds, high_bounds)):
ax = axes[row_idx][col_idx]
ax.fill_between(n_energy_evaluations, low_bound_c, high_bound_c,
alpha=0.35, color='gray')
# We do this multiple times unnecessarily if same_plot is True, but the code is simpler.
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel(statistic_name + ' rel eff')
for col_idx in range(len(system_names)):
if same_plot:
extra_space = 0.1
else:
# Make space for confidence intervals.
extra_space = 1
ylimits = (statistic_ranges[statistic_name][0] - extra_space,
statistic_ranges[statistic_name][1] + extra_space)
axes[row_idx][col_idx].set_ylim(ylimits)
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
axes[-1][1].set_xlabel('Number of force/energy evaluations [10$^6$]')
# Set labels and axes limits.
if not same_plot:
fig.suptitle(submission.paper_name)
output_file_base_name = 'releff-{}-{}'.format(submission.file_name, submission.receipt_id)
output_file_base_path = os.path.join(figure_dir_path, output_file_base_name)
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
if same_plot:
for row_idx in range(len(statistic_names)):
for col_idx in range(len(system_names)):
axes[row_idx][col_idx].set_xlim((0, max_n_energy_evaluations[col_idx]))
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
output_file_base_path = os.path.join(figure_dir_path, 'relative-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def plot_absolute_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000):
sns.set_style('whitegrid')
sns.set_context('paper')
# Keep track of data range by statistic.
statistic_names = ['std', 'absolute bias', 'RMSE']
# Keep track of maximum number of energy evaluations
# to determine plotting range for YANK.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
max_n_energy_eval = {name: 0 for name in system_names}
# Create figure.
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
for submission in submissions + [yank_analysis]:
if 'REVO' in submission.paper_name:
continue
print(submission.paper_name)
# Obtain std, bias, and RMSE of the 5 trajectories.
# If this is a YANK analysis, we get it later specifically for the system.
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
color = SUBMISSION_COLORS[submission.paper_name]
for col_idx, system_name in enumerate(system_names):
# GROMACS/EE doesn't have submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and 'OA' in system_name:
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Select the submission data for only this host-guest system.
if isinstance(submission, YankSamplingAnalysis):
line_style = '--'
mean_data = submission.get_free_energies_from_energy_evaluations(
max_n_energy_eval[system_name], system_name=system_name, mean_trajectory=True)
else:
line_style = '-'
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values
max_n_energy_eval[system_name] = max(max_n_energy_eval[system_name], n_energy_evaluations[-1])
# Discard initial computational costs for which there's no data.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY])[0][0]
n_energy_evaluations = n_energy_evaluations[first_nonzero_idx:]
# Compute cumulative total std, abs_bias, and RMSE.
scale_energy_evaluations = 1e6
norm_factor = (n_energy_evaluations - n_energy_evaluations[0])[1:] / scale_energy_evaluations
avg_std = sp.integrate.cumtrapz(mean_data['std'].values[first_nonzero_idx:]) / norm_factor
avg_abs_bias = sp.integrate.cumtrapz(np.abs(mean_data['bias'].values[first_nonzero_idx:])) / norm_factor
avg_rmse = sp.integrate.cumtrapz(mean_data['RMSE'].values[first_nonzero_idx:]) / norm_factor
# Plot total statistics as a function of the energy evaluations.
# Discard first energy evaluation as cumtrapz doesn't return a result for it.
for row_idx, avg_stats in enumerate([avg_std, avg_abs_bias, avg_rmse]):
ax = axes[row_idx, col_idx]
ax.plot(n_energy_evaluations[1:] / scale_energy_evaluations, avg_stats,
color=color, label=submission.paper_name, ls=line_style)
# Set x axis.
ax.set_xlim((0, n_energy_evaluations[-1] / scale_energy_evaluations))
# Set labels and axes limits.
y_limits = {
'std': (0, 0.4),
'absolute bias': (0, 0.3),
'RMSE': (0, 0.4)
}
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
# Set y limits (shared for each row).
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][col_idx].set_ylim(y_limits[statistic_name])
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
# # Remove shared ticks.
# for row_idx in range(len(statistic_names)):
# for col_idx in range(len(system_names)):
# if col_idx > 0:
# axes[row_idx][col_idx].set_yticklabels([])
# if row_idx < len(statistic_names)-1:
# axes[row_idx][col_idx].set_xticklabels([])
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel('mean ' + statistic_name + ' [kcal/mol]')
axes[-1][1].set_xlabel('N energy evaluations [M]')
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_base_path = os.path.join(figure_dir_path, 'absolute-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def print_relative_efficiency_table(
submissions, yank_analysis, ci=0.95,
n_bootstrap_samples=100,
print_bias_corrected=False
):
"""Create a table with standard deviation, absolute bias, and RMSE relative efficiency."""
methods = []
# Initialize the table to be converted into a Pandas dataframe.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
statistic_names = [r'$e_{\mathrm{std}}$', r'$e_{|\mathrm{bias}|}$', r'$e_{\mathrm{RMSD}}$']
column_names = ['\\makecell{$\Delta$ G \\\\ $[$kcal/mol$]$}', '\\makecell{n eval \\\\ $[$M$]$}'] + statistic_names
# Add columns.
efficiency_table = collections.OrderedDict()
for system_name, column_name in itertools.product(system_names, column_names):
efficiency_table[(system_name, column_name)] = []
for submission in submissions:
# Collect method's names in the given order.
methods.append(submission.paper_name)
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge yet, and the
# long protocol in CS-NS calculations have been run only on CB8-G3.
if ((submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3') or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3')):
relative_efficiencies, relative_efficiencies_corrected = np.full((2, 3), fill_value=np.nan)
dg = ''
n_force_eval = ''
else:
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
relative_efficiencies, cis = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
# Recompute relative efficiencies assuming that YANK converged.
if print_bias_corrected:
relative_efficiencies_corrected, cis_corrected = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples)
# Select the data for only this host-guest system.
mean_data_sub = mean_free_energies[mean_free_energies['System name'] == system_name]
# Get the final free energy and number of energy/force evaluations.
dg = mean_data_sub[DG_KEY].values[-1]
dg_CI = mean_data_sub['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = mean_data_sub['N energy evaluations'].values[-1]
# Convert to string format.
dg = '{} $\\pm$ {}'.format(dg, dg_CI)
n_force_eval = str(int(round(n_force_eval / 1e6)))
# Add free energy and cost entries.
efficiency_table[(system_name, column_names[0])].append(dg)
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# Add efficiency entries for the table.
for statistic_idx, statistic_name in enumerate(statistic_names):
# Gather the format arguments.
rel_effs = [relative_efficiencies[statistic_idx], cis[statistic_idx][0], cis[statistic_idx][1]]
if print_bias_corrected:
rel_effs.append(relative_efficiencies_corrected[statistic_idx])
# Comment this if we don't want to print CIs for the corrected estimate.
rel_effs.extend([cis_corrected[statistic_idx][0], cis_corrected[statistic_idx][1]])
# Print significant digits.
efficiencies_format = []
for e_idx in range(0, len(rel_effs), 3):
rel_eff, low_bound, high_bound = rel_effs[e_idx:e_idx+3]
if high_bound - rel_eff < 0.1 or rel_eff - low_bound < 0.1:
fmt = '{:2.2f}'
else:
fmt = '{:2.1f}'
# Print lower and higher bound as sub and superscripts of the estimate.
efficiencies_format.append(fmt + '$_{{\raisem{{2pt}}{{' + fmt + '}}}}^{{\mathstrut ' + fmt + '}}$')
if np.isnan(rel_effs[0]):
data_entry = ''
# Standard deviation efficiency is not affected by the bias.
elif print_bias_corrected and ('std' not in statistic_name):
data_entry = efficiencies_format[0] + ' (' + efficiencies_format[1] + ')'
data_entry = data_entry.format(*rel_effs)
else:
data_entry = efficiencies_format[0].format(*rel_effs[:3])
# Remove the minus sign from "-0".
data_entry = data_entry.replace('-0.0', '0.0')
data_entry = data_entry.replace('-0.00', '0.00')
efficiency_table[(system_name, statistic_name)].append(data_entry)
# Add row for reference calculation.
methods.append(YANK_METHOD_PAPER_NAME)
# Add free energy and cost entries.
for system_name in system_names:
yank_mean_data = yank_analysis.get_free_energies_from_iteration(
YANK_N_ITERATIONS, system_name=system_name, mean_trajectory=True)
dg = yank_mean_data[DG_KEY].values[-1]
dg_CI = yank_mean_data['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = yank_mean_data['N energy evaluations'].values[-1]
n_force_eval = str(int(round(n_force_eval / 1e6)))
efficiency_table[(system_name, column_names[0])].append('{} $\\pm$ {}'.format(dg, dg_CI))
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# All efficiencies are relative to YANK so they're all 1.
for system_name, statistic_name in itertools.product(system_names, statistic_names):
efficiency_table[(system_name, statistic_name)].append('0.0')
# Convert to Pandas Dataframe.
efficiency_table = pd.DataFrame(efficiency_table)
# Set the method's names as index column.
efficiency_table = efficiency_table.assign(Method=methods)
efficiency_table.set_index(keys='Method', inplace=True)
# Print table.
column_format = 'lccccc|ccccc|ccccc'
efficiency_table_latex = efficiency_table.to_latex(column_format=column_format, multicolumn_format='c',
escape=False)
# Make header and reference method bold.
textbf = lambda s: '\\textbf{' + s + '}'
efficiency_table_latex = efficiency_table_latex.replace(YANK_METHOD_PAPER_NAME, textbf(YANK_METHOD_PAPER_NAME))
efficiency_table_latex = efficiency_table_latex.replace('Method', textbf('Method'))
for system_name in system_names:
efficiency_table_latex = efficiency_table_latex.replace(system_name, textbf(system_name))
for column_name in column_names:
efficiency_table_latex = efficiency_table_latex.replace(column_name, textbf(column_name))
print(efficiency_table_latex)
def print_nonequilibrium_relative_efficiencies(nonequilibrium_submissions):
"""Print relative efficiencies w.r.t. for the nonequilibrium estimators table."""
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
def _get_free_energy_array(submission, system_name, step=1, max_c=100, get_asymptotic=False):
n_replicates = 5
system_data = submission.data[submission.data['System name'] == system_name]
free_energy_array = np.empty(shape=(n_replicates, int(max_c/step)))
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = system_data[system_data['System ID'] == system_id]
free_energy_array[i] = system_id_data[DG_KEY].values[:max_c:step]
if get_asymptotic:
mean_free_energies = submission.mean_free_energies()
asymptotic = mean_free_energies[mean_free_energies['System name'] == system_name][DG_KEY].values[-1]
return free_energy_array, asymptotic
return free_energy_array
# Use GROMACS/NS-DS/SB-long as reference method.
reference_submission = [s for s in nonequilibrium_submissions if s.paper_name == 'GROMACS/NS-DS/SB-long'][0]
# Also remove the other BAR submission.
nonequilibrium_submissions = [s for s in nonequilibrium_submissions if 'GROMACS/NS-DS/SB' not in s.paper_name]
# Get only the first 50 as the 1-directional estimators only have half the cost.
free_energy_ref = {}
asymptotic_ref = {}
for system_name in system_names:
DG, asympt = _get_free_energy_array(reference_submission, system_name, max_c=50, get_asymptotic=True)
free_energy_ref[system_name] = DG
asymptotic_ref[system_name] = asympt
for submission in nonequilibrium_submissions:
print(submission.paper_name, end='')
for system_name in system_names:
free_energy_sub = _get_free_energy_array(submission, system_name, step=2)
rel_eff, cis = compute_all_relative_efficiencies(
free_energy_ref[system_name], free_energy_sub, ci=0.95, n_bootstrap_samples=1000,
asymptotic_free_energy_A=asymptotic_ref[system_name],
asymptotic_free_energy_B=asymptotic_ref[system_name]
)
for i, stat_name in enumerate(['std', 'bias', 'RMSE']):
print(r' & {:.1f}$_{{\raisem{{2pt}}{{{:.1f}}}}}^{{\mathstrut {:.1f}}}$'.format(rel_eff[i], cis[i][0], cis[i][1]), end='')
print(r' \\')
def print_final_prediction_table(submissions, yank_analysis):
"""Plot the table containing the fina binding free energy predictions for all replicates."""
for submission in submissions + [yank_analysis]:
# GROMACS/EE-fullequil predictions are identical to GROMACS/EE
if submission.paper_name == 'GROMACS/EE-fullequil':
continue
if isinstance(submission, YankSamplingAnalysis):
submission_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS)
else:
submission_data = submission.data
submission_data = submission_data[submission_data['Simulation percentage'] == 100]
row_str = submission.paper_name + ' & '
submission_final_DGs = []
for system_id in submission_data['System ID'].unique():
# GROMACS/EE doesn't have predictions for CB8-G3, and the
# GROMACS/NS-DS/SB-long protocol was applied only to CB8-G3.
if (('GROMACS/EE' in submission.paper_name and 'CB8-G3' in system_id) or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and 'OA' in system_id)):
submission_final_DGs.append('')
continue
dg = submission_data.loc[submission_data['System ID'] == system_id, DG_KEY].values[0]
ddg = submission_data.loc[submission_data['System ID'] == system_id, DDG_KEY].values[0]
dg, ddg = reduce_to_first_significant_digit(dg, ddg)
submission_final_DGs.append(r'{} $\pm$ {}'.format(dg, ddg))
row_str += ' & '.join(submission_final_DGs) + r' \\'
print(row_str)
# =============================================================================
# SUPPORTING INFORMATION - SINGLE TRAJECTORIES
# =============================================================================
def plot_single_trajectories_figures(axes, system_data, system_mean_data,
reference_system_mean_data=None,
plot_errors=True, plot_methods_uncertainties=True):
"""Plot individual free energy trajectories and standard deviations for a single method and system."""
system_name = system_data['System name'].unique()[0]
palette_mean = sns.color_palette('pastel')
submission_mean_color = 'black'
reference_mean_color = palette_mean[9]
# Plot the method uncertainties of the single replicate trajectories.
# First scale the number of energy evaluations.
system_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
# Plot the 5 replicates individual trajectories.
# First remove the initial predictions that are 0.0 (i.e. there is no estimate).
ax = axes[0]
system_data = system_data[system_data[DG_KEY] != 0.0]
sns.lineplot(data=system_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(system_mean_data, x='N energy evaluations', ax=ax,
color_mean=submission_mean_color, plot_ci=False,
color_ci=submission_mean_color, label='Best estimate',
scale_n_energy_evaluations=True)
# Plot YANK mean trajectory with CI.
if reference_system_mean_data is not None:
plot_mean_free_energy(reference_system_mean_data, x='N energy evaluations', ax=ax,
color_mean=reference_mean_color, plot_ci=False,
color_ci=reference_mean_color, label='Reference estimate',
scale_n_energy_evaluations=True)
ax.set_title(system_name)
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
# Remove the legend for now, which will be added at the end after tighting up the plot.
ax.get_legend().remove()
# Create a bias axis.
if reference_system_mean_data is not None:
ref_free_energy = reference_free_energies.loc[system_name, DG_KEY]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to make the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if system_name == 'OA-G6':
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
if plot_errors:
# The x-axis is shared between the 2 rows so we can plot the ticks only in the bottom one.
ax.xaxis.set_ticklabels([])
ax.set_xlabel('')
ax = axes[1]
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if plot_methods_uncertainties:
sns.lineplot(data=system_data, x='N energy evaluations', y=DDG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# The legend is added later at the top.
ax.get_legend().remove()
# Plot the standard deviation of the free energy trajectories.
# submission_std = system_mean_data['std']
submission_std = system_mean_data['unbiased_std']
# cost = system_mean_data['Simulation percentage'].values
cost = system_mean_data['N energy evaluations'].values / N_ENERGY_EVALUATIONS_SCALE
ax.plot(cost, submission_std, color=submission_mean_color)
# Plot confidence interval around standard deviation.
submission_std_low_ci = system_mean_data['unbiased_std_low_CI'].values
submission_std_up_ci = system_mean_data['unbiased_std_up_CI'].values
ax.fill_between(cost, submission_std_low_ci, submission_std_up_ci, alpha=0.35, color='gray')
if reference_system_mean_data is not None:
# reference_std = reference_system_mean_data['std']
reference_std = reference_system_mean_data['unbiased_std']
ax.plot(cost, reference_std, color=reference_mean_color)
# Only the central plot shows the x-label.
ax.set_xlabel('')
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
else:
ax.set_ylabel('std($\Delta$G) [kcal/mol]')
# Set x limits.
for ax in axes:
ax.set_xlim((0, max(system_data['N energy evaluations'])))
def plot_all_single_trajectories_figures(submissions, yank_analysis, plot_errors=True, output_path_dir=None):
"""Individual plots for each method with the 5 individual free energy and uncertainty trajectories."""
sns.set_style('whitegrid')
sns.set_context('paper')
if output_path_dir is None:
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-individual-trajectories/')
os.makedirs(output_path_dir, exist_ok=True)
# -------------------- #
# Plot submission data #
# -------------------- #
# Remove nonequilibrium-switching calculations with single-direction estimators.
submissions = [s for s in submissions if ('Jarz' not in s.paper_name and 'Gauss' not in s.paper_name)]
for submission in submissions + [yank_analysis]:
# CB8-G3 calculations for GROMACS/EE did not converge yet.
if submission.name == 'Expanded-ensemble/MBAR':
submission.data = submission.data[submission.data['System name'] != 'CB8-G3']
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if 'REVO' in submission.paper_name:
plot_methods_uncertainties = False
else:
plot_methods_uncertainties = True
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
unique_system_names = submission.data['System name'].unique()
else:
unique_system_names = sorted(submission.system_names)
# Create a figure with 3 axes (one for each system).
n_systems = len(unique_system_names)
if plot_errors:
# The second row will plot the errors.
fig, axes = plt.subplots(nrows=2, ncols=n_systems, figsize=(7.25, 4.8))
trajectory_axes = axes[0]
else:
fig, axes = plt.subplots(nrows=1, ncols=n_systems, figsize=(7.25, 2.4))
trajectory_axes = axes
# Set figure title.
fig.suptitle(submission.paper_name)
# Determine range of data across systems.
min_DG = np.inf
max_DG = -np.inf
min_dDG = np.inf
max_dDG = -np.inf
# for system_name in unique_system_names:
for ax_idx, system_name in enumerate(unique_system_names):
if isinstance(submission, YankSamplingAnalysis):
data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
else:
# Select the data for only this host-guest system.
data = submission.data[submission.data['System name'] == system_name]
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
plot_single_trajectories_figures(axes[:,ax_idx], data, mean_data, plot_errors=plot_errors,
reference_system_mean_data=None,
plot_methods_uncertainties=plot_methods_uncertainties)
# Collect max and min data to determine axes range.
min_DG = min(min_DG, min(data[DG_KEY]), min(mean_data[DG_KEY]))
max_DG = max(max_DG, max(data[DG_KEY]), max(mean_data[DG_KEY]))
min_dDG = min(min_dDG, min(data[DDG_KEY]), min(mean_data['std']))
max_dDG = max(max_dDG, max(data[DDG_KEY]), max(mean_data['std']))
# Set limits.
for i in range(len(unique_system_names)):
axes[0][i].set_ylim((min_DG, max_DG))
axes[1][i].set_ylim((min_dDG, max_dDG))
# Keep ticks only in external plots.
axes[0][i].set_xticklabels([])
for i in range(1, len(unique_system_names)):
axes[0][i].set_yticklabels([])
axes[1][i].set_yticklabels([])
# The x-label is shown only in the central plot.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.2, rect=[0.0, 0.0, 1.0, 0.85])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = trajectory_axes[0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (-0.1, 1.35)
trajectory_axes[0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'replicates-{}-{}'.format(submission.file_name, submission.receipt_id)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# SUPPORTING INFORMATION - HREX/MBAR STATISTICAL INEFFICIENCY ANALYSIS
# =============================================================================
def plot_hrex_stat_ineff_trajectories():
"""Individual plots for HREX with the 5 individual free energy and uncertainty trajectories
as a function of the statistical inefficiency."""
sns.set_context('paper')
# Limits of y-axis (free energies, uncertainties) by system.
y_limits = {
'CB8-G3': [(-14, -10), (0, 2)],
'OA-G3': [(-9, -5), (0, 1.5)],
'OA-G6': [(-9, -5), (0, 1.5)],
}
# Create output dir.
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-statistical-inefficiency')
os.makedirs(output_path_dir, exist_ok=True)
# Read the data, which is organized by statistical inefficiency.
# We'll then plot by system.
yank_analysis_by_statineff = collections.OrderedDict()
for stat_ineff in ['5', '10', '20', '50', '100', '200']:
data_dir_path = os.path.join('YankAnalysis', 'CorrelationAnalysis', 'statineff-{}'.format(stat_ineff))
yank_analysis = YankSamplingAnalysis(data_dir_path)
yank_analysis_by_statineff[stat_ineff] = yank_analysis
# Plot by system.
for system_name in ['CB8-G3', 'OA-G3', 'OA-G6']:
fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(7.25, 9.8))
# Set figure title.
fig.suptitle('HREX uncertainty predictions as a function of\n'
'statistical inefficiency for {}'.format(system_name))
# for system_name in unique_system_names:
for stat_ineff_idx, stat_ineff in enumerate(yank_analysis_by_statineff):
yank_analysis = yank_analysis_by_statineff[stat_ineff]
data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
# Plot on the correct axis.
DG_row = 2*int(stat_ineff_idx / 3)
col = stat_ineff_idx % 3
stat_ineff_axes = axes[DG_row:DG_row+2, col]
plot_single_trajectories_figures(stat_ineff_axes, data, mean_data, plot_errors=True,
reference_system_mean_data=None,
plot_methods_uncertainties=True)
# Set titles and limits.
title = 'Statistical inefficiency: {} ps'.format(stat_ineff)
if DG_row > 0:
title = '\n' + title
stat_ineff_axes[0].set_title(title, fontweight='bold')
stat_ineff_axes[0].set_ylim(y_limits[system_name][0])
stat_ineff_axes[1].set_ylim(y_limits[system_name][1])
stat_ineff_axes[0].set_ylabel('$\Delta$G [kcal/mol]')
stat_ineff_axes[1].set_ylabel('std($\Delta$G) [kcal/mol]')
# Keep ticks only in external plots.
for row_idx in range(axes.shape[0]):
for col_idx in range(axes.shape[1]):
if row_idx != len(axes[0]) - 1:
axes[row_idx][col_idx].set_xticklabels([])
if col_idx != 0:
axes[row_idx][col_idx].set_ylabel('')
axes[row_idx][col_idx].set_yticklabels([])
# Set x label.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.0, rect=[0.0, 0.0, 1.0, 0.88])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = axes[0][0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (0.05, 1.35)
axes[0][0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'statineff-{}'.format(system_name)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
sns.set_style('whitegrid')
sns.set_context('paper')
# Read reference values.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
# Obtain free energies and final reference values.
mean_reference_free_energies = yank_analysis.get_free_energies_from_iteration(YANK_N_ITERATIONS, mean_trajectory=True)
reference_free_energies = mean_reference_free_energies[mean_reference_free_energies['Simulation percentage'] == 100]
reference_free_energies.set_index('System name', inplace=True)
# Compute efficiency of reference.
reference_efficiencies = {}
for system_name in mean_reference_free_energies['System name'].unique():
mean_data = mean_reference_free_energies[mean_reference_free_energies ['System name'] == system_name]
reference_efficiencies[system_name], n_discarded = fit_efficiency(mean_data)
# Import user map.
with open('../SubmissionsDoNotUpload/SAMPL6_user_map.csv', 'r') as f:
user_map = pd.read_csv(f)
# Load submissions data. We do OA and TEMOA together.
all_submissions = load_submissions(SamplingSubmission, SAMPLING_SUBMISSIONS_DIR_PATH, user_map)
# Remove AMBER/TI.
all_submissions = [s for s in all_submissions if s.name not in ['Langevin/Virtual Bond/TI']]
# Create an extra submission for GROMACS/EE where the full cost of equilibration has been taken into account.
gromacs_ee_submission = copy.deepcopy([s for s in all_submissions if s.paper_name == 'GROMACS/EE'][0])
gromacs_ee_submission.paper_name = 'GROMACS/EE-fullequil'
gromacs_ee_submission.file_name = 'EENVT-fullequil'
data = gromacs_ee_submission.data # Shortcut.
mean_free_energies = gromacs_ee_submission.mean_free_energies()
for system_name in ['OA-G3', 'OA-G6']:
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
full_equilibration_cost = mean_data['N energy evaluations'].values[first_nonzero_idx] * 4
for i in data[data['System name'] == system_name].index:
data.at[i, 'N energy evaluations'] += full_equilibration_cost
all_submissions.append(gromacs_ee_submission)
# Sort the submissions to have all pot and tables in the same order.
all_submissions = sorted(all_submissions, key=lambda s: s.paper_name)
# Separate the main submissions from the data about nonequilibrium estimators.
main_submissions = [s for s in all_submissions if not ('Jarz' in s.paper_name or 'Gauss' in s.paper_name)]
noneq_submissions = [s for s in all_submissions if 'NS' in s.paper_name]
# Export YANK analysis and submissions to CSV/JSON tables.
yank_analysis.export(os.path.join(SAMPLING_DATA_DIR_PATH, 'reference_free_energies'))
for s in main_submissions:
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, s.receipt_id + '-reference')
yank_analysis.export_by_submission(file_base_path, s)
export_submissions(all_submissions, reference_free_energies)
# Create example trajectory for the figure describing the challenge process.
plot_example_bias_variance(yank_analysis, max_n_eval_percentage=0.4, mixed_proportion=0.3)
# Cartoon explaining mean error and relative efficiency.
plot_mean_error_cartoon()
# Create figure with free energy, standard deviation, and bias as a function of computational cost.
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=False)
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=True)
# Create results and efficiency table.
print_relative_efficiency_table(main_submissions, yank_analysis, print_bias_corrected=False)
# Plot nonequilibrium-switching single-direction estimator.
plot_all_nonequilibrium_switching(noneq_submissions)
# Plot sensitivity analysis figure.
plot_restraint_and_barostat_analysis()
# Plot figure for HREX bias analysis.
plot_yank_bias()
# Supporting information
# ----------------------
# Absolute/relative efficiency as a function of the computational cost.
plot_relative_efficiencies(main_submissions, yank_analysis)
plot_relative_efficiencies(main_submissions, yank_analysis, ci=None, same_plot=True)
plot_absolute_efficiencies(main_submissions, yank_analysis)
# Relative efficiency for uni/bi-directional estimators.
print_nonequilibrium_relative_efficiencies(noneq_submissions)
# Plot replicate predictions table.
print_final_prediction_table(all_submissions, yank_analysis)
# Plot individual trajectories.
plot_all_single_trajectories_figures(all_submissions, yank_analysis)
# Plot statistical inefficiency analysis.
plot_hrex_stat_ineff_trajectories()
# Supporting information for bias section.
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-bias_hrex')
plot_decomposition('CB8-G3', starting_iteration=5, type='phase',
output_file_path=output_dir_path + '/free-energy-phase-decomposition.pdf'))
plot_decomposition('CB8-G3', starting_iteration=5, type='entropy-enthalpy',
output_file_path=output_dir_path + '/free-energy-entropy-decomposition.pdf')
| mit |
dylanGeng/BuildingMachineLearningSystemsWithPython | ch09/fft.py | 24 | 3673 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sys
import os
import glob
import numpy as np
import scipy
import scipy.io.wavfile
from utils import GENRE_DIR, CHART_DIR
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
def write_fft(fft_features, fn):
"""
Write the FFT features to separate files to speed up processing.
"""
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print("Written "%data_fn)
def create_fft(fn):
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = abs(scipy.fft(X)[:1000])
write_fft(fft_features, fn)
def read_fft(genre_list, base_dir=GENRE_DIR):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
file_list = glob.glob(genre_dir)
assert(file_list), genre_dir
for fn in file_list:
fft_features = np.load(fn)
X.append(fft_features[:2000])
y.append(label)
return np.array(X), np.array(y)
def plot_wav_fft(wav_filename, desc=None):
plt.clf()
plt.figure(num=None, figsize=(6, 4))
sample_rate, X = scipy.io.wavfile.read(wav_filename)
spectrum = np.fft.fft(X)
freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)
plt.subplot(211)
num_samples = 200.0
plt.xlim(0, num_samples / sample_rate)
plt.xlabel("time [s]")
plt.title(desc or wav_filename)
plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
plt.grid(True)
plt.subplot(212)
plt.xlim(0, 5000)
plt.xlabel("frequency [Hz]")
plt.xticks(np.arange(5) * 1000)
if desc:
desc = desc.strip()
fft_desc = desc[0].lower() + desc[1:]
else:
fft_desc = wav_filename
plt.title("FFT of %s" % fft_desc)
plt.plot(freq, abs(spectrum), linewidth=5)
plt.grid(True)
plt.tight_layout()
rel_filename = os.path.split(wav_filename)[1]
plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
bbox_inches='tight')
plt.show()
def plot_wav_fft_demo():
plot_wav_fft("sine_a.wav", "400Hz sine wave")
plot_wav_fft("sine_b.wav", "3,000Hz sine wave")
plot_wav_fft("sine_mix.wav", "Mixed sine wave")
def plot_specgram(ax, fn):
sample_rate, X = scipy.io.wavfile.read(fn)
ax.specgram(X, Fs=sample_rate, xextent=(0, 30))
def plot_specgrams(base_dir=CHART_DIR):
"""
Plot a bunch of spectrograms of wav files in different genres
"""
plt.clf()
genres = ["classical", "jazz", "country", "pop", "rock", "metal"]
num_files = 3
f, axes = plt.subplots(len(genres), num_files)
for genre_idx, genre in enumerate(genres):
for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))):
if idx == num_files:
break
axis = axes[genre_idx, idx]
axis.yaxis.set_major_formatter(EngFormatter())
axis.set_title("%s song %i" % (genre, idx + 1))
plot_specgram(axis, fn)
specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png")
plt.savefig(specgram_file, bbox_inches="tight")
plt.show()
if __name__ == "__main__":
# for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")):
# create_fft(fn)
# plot_decomp()
if len(sys.argv) > 1:
plot_wav_fft(sys.argv[1], desc="some sample song")
else:
plot_wav_fft_demo()
plot_specgrams()
| mit |
equialgo/scikit-learn | examples/hetero_feature_union.py | 81 | 6241 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
lhirschfeld/JargonBot | custombot.py | 1 | 3061 | import pickle
import praw
import random
from textblob import TextBlob
from datetime import datetime
from sklearn import linear_model
class RedditBot:
"""A class that performs basic operations, working with Reddit's
PRAW API."""
def __init__(self, botName):
# Setup the bot and primary variables.
self.r = praw.Reddit(botName)
self.responses = []
with open('ids.pickle', 'rb') as handle:
try:
self.ids = pickle.load(handle)
except EOFError:
self.ids = []
with open('models.pickle', 'rb') as handle:
try:
self.models = pickle.load(handle)
except EOFError:
self.models = {}
def updateIds(self):
# Save the new ids of comments that have been responded to.
with open('ids.pickle', 'wb') as handle:
pickle.dump(self.ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
def createModel(self, sub, init_fit):
new_model = linear_model.LinearRegression()
new_model.fit(init_fit[0], init_fit[1])
# TODO: Create sub class that stores this data.
self.models[sub] = (new_model, 1, init_fit[0], init_fit[1])
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
def updateModels(self, modelParams):
# Model params is a list of strings which contains the keys in
# each result which should be used to update the model.
# Models is a dictionary with a touple at each key containing:
# (linear regression, randomness rate, x fits, y fits)
currentTime = datetime.now()
oldResponses = [(currentTime - r["time"]).total_seconds() > 3600
for r in self.responses]
self.responses = [(currentTime - r["time"]).total_seconds() < 3600
for r in self.responses]
for r in oldResponses:
result = 0
url = "https://reddit.com/" + r["sID"] + "?comment=" + r["cID"]
submission = self.r.get_submission(url=url)
comment_queue = submission.comments[:]
if comment_queue:
com = comment_queue.pop(0)
result += com.score
comment_queue.extend(com.replies)
while comment_queue:
com = comment_queue.pop(0)
text = TextBlob(com.text)
result += text.sentiment.polarity * com.score
x = []
for key in modelParams:
x.append(r[key])
# Get old fits
x_fits = self.models[r["sub"]][2].append(x)
y_fits = self.models[r["sub"]][3].append(result)
self.models[r["sub"]][0].fit(x_fits, y_fits)
# Update odds of random choice
self.models[r]["sub"][1] *= 0.96
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
| mit |
craigcitro/pydatalab | tests/bigquery/schema_tests.py | 6 | 4284 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import pandas
import sys
import unittest
import google.datalab.bigquery
import google.datalab.utils
class TestCases(unittest.TestCase):
def test_schema_from_dataframe(self):
df = TestCases._create_data_frame()
result = google.datalab.bigquery.Schema.from_data(df)
self.assertEqual(google.datalab.bigquery.Schema.from_data(TestCases._create_inferred_schema()),
result)
def test_schema_from_data(self):
variant1 = [
3,
2.0,
True,
['cow', 'horse', [0, []]]
]
variant2 = collections.OrderedDict()
variant2['Column1'] = 3
variant2['Column2'] = 2.0
variant2['Column3'] = True
variant2['Column4'] = collections.OrderedDict()
variant2['Column4']['Column1'] = 'cow'
variant2['Column4']['Column2'] = 'horse'
variant2['Column4']['Column3'] = collections.OrderedDict()
variant2['Column4']['Column3']['Column1'] = 0
variant2['Column4']['Column3']['Column2'] = collections.OrderedDict()
master = [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'FLOAT'},
{'name': 'Column3', 'type': 'BOOLEAN'},
{'name': 'Column4', 'type': 'RECORD', 'fields': [
{'name': 'Column1', 'type': 'STRING'},
{'name': 'Column2', 'type': 'STRING'},
{'name': 'Column3', 'type': 'RECORD', 'fields': [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'RECORD', 'fields': []}
]}
]}
]
schema_master = google.datalab.bigquery.Schema(master)
with self.assertRaises(Exception) as error1:
google.datalab.bigquery.Schema.from_data(variant1)
if sys.version_info[0] == 3:
self.assertEquals('Cannot create a schema from heterogeneous list [3, 2.0, True, ' +
'[\'cow\', \'horse\', [0, []]]]; perhaps you meant to use ' +
'Schema.from_record?', str(error1.exception))
else:
self.assertEquals('Cannot create a schema from heterogeneous list [3, 2.0, True, ' +
'[u\'cow\', u\'horse\', [0, []]]]; perhaps you meant to use ' +
'Schema.from_record?', str(error1.exception))
schema3 = google.datalab.bigquery.Schema.from_data([variant1])
schema4 = google.datalab.bigquery.Schema.from_data([variant2])
schema5 = google.datalab.bigquery.Schema.from_data(master)
schema6 = google.datalab.bigquery.Schema.from_record(variant1)
schema7 = google.datalab.bigquery.Schema.from_record(variant2)
self.assertEquals(schema_master, schema3, 'schema inferred from list of lists with from_data')
self.assertEquals(schema_master, schema4, 'schema inferred from list of dicts with from_data')
self.assertEquals(schema_master, schema5, 'schema inferred from BQ schema list with from_data')
self.assertEquals(schema_master, schema6, 'schema inferred from list with from_record')
self.assertEquals(schema_master, schema7, 'schema inferred from dict with from_record')
@staticmethod
def _create_data_frame():
data = {
'some': [
0, 1, 2, 3
],
'column': [
'r0', 'r1', 'r2', 'r3'
],
'headers': [
10.0, 10.0, 10.0, 10.0
]
}
return pandas.DataFrame(data)
@staticmethod
def _create_inferred_schema(extra_field=None):
schema = [
{'name': 'some', 'type': 'INTEGER'},
{'name': 'column', 'type': 'STRING'},
{'name': 'headers', 'type': 'FLOAT'},
]
if extra_field:
schema.append({'name': extra_field, 'type': 'INTEGER'})
return schema
| apache-2.0 |
aje/POT | examples/plot_optim_OTreg.py | 2 | 2940 | # -*- coding: utf-8 -*-
"""
==================================
Regularized OT with generic solver
==================================
Illustrates the use of the generic solver for regularized OT with
user-designed regularization term. It uses Conditional gradient as in [6] and
generalized Conditional Gradient as proposed in [5][7].
[5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for
Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine
Intelligence , vol.PP, no.99, pp.1-1.
[6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport. SIAM Journal on Imaging Sciences,
7(3), 1853-1882.
[7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized
conditional gradient: analysis of convergence and applications.
arXiv preprint arXiv:1510.06567.
"""
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve EMD with Frobenius norm regularization
# --------------------------------------------
#%% Example with Frobenius norm regularization
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(3)
ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg')
##############################################################################
# Solve EMD with entropic regularization
# --------------------------------------
#%% Example with entropic regularization
def f(G):
return np.sum(G * np.log(G))
def df(G):
return np.log(G) + 1.
reg = 1e-3
Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg')
##############################################################################
# Solve EMD with Frobenius norm + entropic regularization
# -------------------------------------------------------
#%% Example with Frobenius norm + entropic regularization with gcg
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True)
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg')
pl.show()
| mit |
igabriel85/dmon-adp | adpformater/adpformater.py | 1 | 1615 | import pandas as pd
class DataFormatter():
def __init__(self, dataloc):
self.dataloc = dataloc
def aggJsonToCsv(self):
return "CSV file"
def expTimestamp(self):
return "Expand metric timestamp"
def window(self):
return "Window metrics"
def pivot(self):
return "Pivot values"
def addID(self):
return "Add new ID as index"
def removeID(self):
return "Remove selected column as index"
def renameHeader(self):
return "Rename headers"
def normalize(self):
return "Normalize data"
def denormalize(self):
return "Denormalize data"
input_table = pd.read_csv("metrics.csv")
for index, row in input_table.iterrows():
input_table = input_table.append([row]*9)
input_table = input_table.sort_values(['row ID'])
input_table = input_table.reset_index(drop=True)
for index, rows in input_table.iterrows():
if int(index) > 59:
print "Index to big!"
time = rows[0].split(", ", 1) #In Knime row for timestamp is row(55) last one
timeHour = time[1].split(":", 2)
timeHourSeconds = timeHour[2].split(".", 1)
timeHourSecondsDecimal = timeHour[2].split(".", 1)
timeHourSecondsDecimal[0] = str(index)
if len(timeHourSecondsDecimal[0]) == 1:
timeHourSecondsDecimal[0] = '0%s' %timeHourSecondsDecimal[0]
decimal = '.'.join(timeHourSecondsDecimal)
timeHour[2] = decimal
timenew = ':'.join(timeHour)
time[1] = timenew
finalString = ', '.join(time)
input_table.set_value(index, 'row ID', finalString)
input_table.to_csv('out.csv')
| apache-2.0 |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py | 1 | 2628 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| mit |
manuelli/director | src/python/director/planplayback.py | 1 | 7857 | import os
import vtkAll as vtk
import math
import time
import re
import numpy as np
from director.timercallback import TimerCallback
from director import objectmodel as om
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import robotstate
import copy
import pickle
import scipy.interpolate
def asRobotPlan(msg):
'''
If the given message is a robot_plan_with_supports_t then this function returns
the plan message contained within it. For any other message type, this function
just returns its input argument.
'''
try:
import drc as lcmdrc
except ImportError:
pass
else:
if isinstance(msg, lcmdrc.robot_plan_with_supports_t):
return msg.plan
return msg
class PlanPlayback(object):
def __init__(self):
self.animationCallback = None
self.animationTimer = None
self.interpolationMethod = 'slinear'
self.playbackSpeed = 1.0
self.jointNameRegex = ''
@staticmethod
def getPlanPoses(msgOrList):
if isinstance(msgOrList, list):
messages = msgOrList
allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0])
for msg in messages[1:]:
poseTimes, poses = PlanPlayback.getPlanPoses(msg)
poseTimes += allPoseTimes[-1]
allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:]))
allPoses += poses[1:]
return allPoseTimes, allPoses
else:
msg = asRobotPlan(msgOrList)
poses = []
poseTimes = []
for plan in msg.plan:
pose = robotstate.convertStateMessageToDrakePose(plan)
poseTimes.append(plan.utime / 1e6)
poses.append(pose)
return np.array(poseTimes), poses
@staticmethod
def getPlanElapsedTime(msg):
msg = asRobotPlan(msg)
startTime = msg.plan[0].utime
endTime = msg.plan[-1].utime
return (endTime - startTime) / 1e6
@staticmethod
def mergePlanMessages(plans):
msg = copy.deepcopy(plans[0])
for plan in plans[1:]:
plan = copy.deepcopy(plan)
lastTime = msg.plan[-1].utime
for state in plan.plan:
state.utime += lastTime
msg.plan_info += plan.plan_info
msg.plan += plan.plan
msg.num_states = len(msg.plan)
return msg
@staticmethod
def isPlanInfoFeasible(info):
return 0 <= info < 10
@staticmethod
def isPlanFeasible(plan):
plan = asRobotPlan(plan)
return plan is not None and (max(plan.plan_info) < 10 and min(plan.plan_info) >= 0)
def stopAnimation(self):
if self.animationTimer:
self.animationTimer.stop()
def setInterpolationMethod(method):
self.interpolationMethod = method
def playPlan(self, msg, jointController):
self.playPlans([msg], jointController)
def playPlans(self, messages, jointController):
assert len(messages)
poseTimes, poses = self.getPlanPoses(messages)
self.playPoses(poseTimes, poses, jointController)
def getPoseInterpolatorFromPlan(self, message):
poseTimes, poses = self.getPlanPoses(message)
return self.getPoseInterpolator(poseTimes, poses)
def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True):
if unwrap_rpy:
poses = np.array(poses, copy=True)
poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(poseTimes, poses, axis=0)
return f
def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples):
poseTimes, poses = self.getPlanPoses(messages)
f = self.getPoseInterpolator(poseTimes, poses)
sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
meshes = []
for sampleTime in sampleTimes:
pose = f(sampleTime)
jointController.setPose('plan_playback', pose)
polyData = vtk.vtkPolyData()
robotModel.model.getModelMesh(polyData)
meshes.append(polyData)
return meshes
def showPoseAtTime(self, time, jointController, poseInterpolator):
pose = poseInterpolator(time)
jointController.setPose('plan_playback', pose)
def playPoses(self, poseTimes, poses, jointController):
f = self.getPoseInterpolator(poseTimes, poses)
timer = SimpleTimer()
def updateAnimation():
tNow = timer.elapsed() * self.playbackSpeed
if tNow > poseTimes[-1]:
pose = poses[-1]
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
return False
pose = f(tNow)
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = updateAnimation
self.animationTimer.start()
updateAnimation()
def picklePlan(self, filename, msg):
poseTimes, poses = self.getPlanPoses(msg)
pickle.dump((poseTimes, poses), open(filename, 'w'))
def getMovingJointNames(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
return jointNames
def plotPlan(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
self.plotPoses(poseTimes, poses)
def plotPoses(self, poseTimes, poses):
import matplotlib.pyplot as plt
poses = np.array(poses)
if self.jointNameRegex:
jointIds = range(poses.shape[1])
else:
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
jointTrajectories = [poses[:,jointId] for jointId in jointIds]
seriesNames = []
sampleResolutionInSeconds = 0.01
numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds
xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
fig = plt.figure()
ax = fig.add_subplot(111)
for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories):
if self.jointNameRegex and not re.match(self.jointNameRegex, jointName):
continue
x = poseTimes
y = jointTrajectory
y = np.rad2deg(y)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(x, y)
ax.plot(x, y, 'ko')
seriesNames.append(jointName + ' points')
ax.plot(xnew, f(xnew), '-')
seriesNames.append(jointName + ' ' + self.interpolationMethod)
ax.legend(seriesNames, loc='upper right').draggable()
ax.set_xlabel('time (s)')
ax.set_ylabel('joint angle (deg)')
ax.set_title('joint trajectories')
plt.show()
| bsd-3-clause |
rgllm/uminho | 04/CN/TP3/src/src/parser/PsoTools.py | 1 | 4783 | import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
| mit |
mhoffman/kmos | kmos/cli.py | 1 | 16514 | #!/usr/bin/env python
"""Entry point module for the command-line
interface. The kmos executable should be
on the program path, import this modules
main function and run it.
To call kmos command as you would from the shell,
use ::
kmos.cli.main('...')
Every command can be shortened as long as it is non-ambiguous, e.g. ::
kmos ex <xml-file>
instead of ::
kmos export <xml-file>
etc.
"""
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
usage = {}
usage['all'] = """kmos help all
Display documentation for all commands.
"""
usage['benchmark'] = """kmos benchmark
Run 1 mio. kMC steps on model in current directory
and report runtime.
"""
usage['build'] = """kmos build
Build kmc_model.%s from *f90 files in the
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['help'] = """kmos help <command>
Print usage information for the given command.
"""
usage['export'] = """kmos export <xml-file> [<export-path>]
Take a kmos xml-file and export all generated
source code to the export-path. There try to
build the kmc_model.%s.
Additional Parameters ::
-s/--source-only
Export source only and don't build binary
-b/--backend (local_smart|lat_int)
Choose backend. Default is "local_smart".
lat_int is EXPERIMENTAL and not made
for production, yet.
-d/--debug
Turn on assertion statements in F90 code.
(Only active in compile step)
--acf
Build the modules base_acf.f90 and proclist_acf.f90. Default is false.
This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement).
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>]
Take a kmos xml-file and export kmc_settings.py
to the export-path.
"""
usage['edit'] = """kmos edit <xml-file>
Open the kmos xml-file in a GUI to edit
the model.
"""
usage['import'] = """kmos import <xml-file>
Take a kmos xml-file and open an ipython shell
with the project_tree imported as pt.
"""
usage['rebuild'] = """kmos rebuild
Export code and rebuild binary module from XML
information included in kmc_settings.py in
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
"""
usage['shell'] = """kmos shell
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['run'] = """kmos run
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['version'] = """kmos version
Print version number and exit.
"""
usage['view'] = """kmos view
Take a kmc_model.%s and kmc_settings.py in the
same directory and start to simulate the
model visually.
Additional Parameters ::
-v/--steps-per-frame <number>
Number of steps per frame
""" % ('pyd' if os.name == 'nt' else 'so')
usage['xml'] = """kmos xml
Print xml representation of model to stdout
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import kmos
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=kmos.__version__)
parser.add_option('-s', '--source-only',
dest='source_only',
action='store_true',
default=False)
parser.add_option('-p', '--path-to-f2py',
dest='path_to_f2py',
default='f2py')
parser.add_option('-b', '--backend',
dest='backend',
default='local_smart')
parser.add_option('-a', '--avoid-default-state',
dest='avoid_default_state',
action='store_true',
default=False,
)
parser.add_option('-v', '--steps-per-frame',
dest='steps_per_frame',
type='int',
default='50000')
parser.add_option('-d', '--debug',
default=False,
dest='debug',
action='store_true')
parser.add_option('-n', '--no-compiler-optimization',
default=False,
dest='no_optimize',
action='store_true')
parser.add_option('-o', '--overwrite',
default=False,
action='store_true')
parser.add_option('-l', '--variable-length',
dest='variable_length',
default=95,
type='int')
parser.add_option('-c', '--catmap',
default=False,
action='store_true')
parser.add_option('--acf',
dest='acf',
action='store_true',
default=False,
)
try:
from numpy.distutils.fcompiler import get_default_fcompiler
from numpy.distutils import log
log.set_verbosity(-1, True)
fcompiler = get_default_fcompiler()
except:
fcompiler = 'gfortran'
parser.add_option('-f', '--fcompiler',
dest='fcompiler',
default=os.environ.get('F2PY_FCOMPILER', fcompiler))
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ kmos <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
global model, pt, np, cm_model
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
if args[0] == 'benchmark':
from sys import path
path.append(os.path.abspath(os.curdir))
nsteps = 1000000
from time import time
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
time0 = time()
try:
model.proclist.do_kmc_steps(nsteps)
except: # kmos < 0.3 had no model.proclist.do_kmc_steps
model.do_steps(nsteps)
needed_time = time() - time0
print('Using the [%s] backend.' % model.get_backend())
print('%s steps took %.2f seconds' % (nsteps, needed_time))
print('Or %.2e steps/s' % (1e6 / needed_time))
model.deallocate()
elif args[0] == 'build':
from kmos.utils import build
build(options)
elif args[0] == 'edit':
from kmos import gui
gui.main()
elif args[0] == 'settings-export':
import kmos.types
import kmos.io
from kmos.io import ProcListWriter
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = args[2]
project = kmos.types.Project()
project.import_file(xml_file)
writer = ProcListWriter(project, export_dir)
writer.write_settings()
elif args[0] == 'export':
import kmos.types
import kmos.io
from kmos.utils import build
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = os.path.join(args[2], 'src')
project = kmos.types.Project()
project.import_file(xml_file)
project.shorten_names(max_length=options.variable_length)
kmos.io.export_source(project,
export_dir,
options=options)
if ((os.name == 'posix'
and os.uname()[0] in ['Linux', 'Darwin'])
or os.name == 'nt') \
and not options.source_only:
os.chdir(export_dir)
build(options)
for out in glob('kmc_*'):
if os.path.exists('../%s' % out) :
if options.overwrite :
overwrite = 'y'
else:
overwrite = raw_input(('Should I overwrite existing %s ?'
'[y/N] ') % out).lower()
if overwrite.startswith('y') :
print('Overwriting {out}'.format(**locals()))
os.remove('../%s' % out)
shutil.move(out, '..')
else :
print('Skipping {out}'.format(**locals()))
else:
shutil.move(out, '..')
elif args[0] == 'settings-export':
import kmos.io
pt = kmos.io.import_file(args[1])
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting kmc_settings.py to %s'
% out_dir)
args.append(out_dir)
if not os.path.exists(args[2]):
os.mkdir(args[2])
elif not os.path.isdir(args[2]):
raise UserWarning("Cannot overwrite %s; Exiting;" % args[2])
writer = kmos.io.ProcListWriter(pt, args[2])
writer.write_settings()
elif args[0] == 'help':
if len(args) < 2:
parser.error('Which help do you want?')
if args[1] == 'all':
for command in sorted(usage):
print(usage[command])
elif args[1] in usage:
print('Usage: %s\n' % usage[args[1]])
else:
arg = match_keys(args[1], usage, parser)
print('Usage: %s\n' % usage[arg])
elif args[0] == 'import':
import kmos.io
if not len(args) >= 2:
raise UserWarning('XML file name expected.')
pt = kmos.io.import_xml_file(args[1])
if len(args) == 2:
sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1])
elif len(args) == 3: # if optional 3rd argument is given, store model there and exit
pt.save(args[2])
elif args[0] == 'rebuild':
from time import sleep
print('Will rebuild model from kmc_settings.py in current directory')
print('Please do not interrupt,'
' build process, as you will most likely')
print('loose the current model files.')
sleep(2.)
from sys import path
path.append(os.path.abspath(os.curdir))
from tempfile import mktemp
if not os.path.exists('kmc_model.so') \
and not os.path.exists('kmc_model.pyd'):
raise Exception('No kmc_model.so found.')
if not os.path.exists('kmc_settings.py'):
raise Exception('No kmc_settings.py found.')
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
tempfile = mktemp()
f = file(tempfile, 'w')
f.write(model.xml())
f.close()
for kmc_model in glob('kmc_model.*'):
os.remove(kmc_model)
os.remove('kmc_settings.py')
main('export %s -b %s .' % (tempfile, options.backend))
os.remove(tempfile)
model.deallocate()
elif args[0] in ['run', 'shell']:
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
# useful to have in interactive mode
import numpy as np
try:
from matplotlib import pyplot as plt
except:
plt = None
if options.catmap:
import catmap
import catmap.cli.kmc_runner
seed = catmap.cli.kmc_runner.get_seed_from_path('.')
cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals()))
catmap_message = '\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file="{seed}.mkm")'.format(**locals())
else:
catmap_message = ''
try:
model = KMC_Model(print_rates=False)
except:
print("Warning: could not import kmc_model!"
" Please make sure you are in the right directory")
sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals()))
try:
model.deallocate()
except:
print("Warning: could not deallocate model. Was is allocated?")
elif args[0] == 'version':
from kmos import VERSION
print(VERSION)
elif args[0] == 'view':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos import view
view.main(steps_per_frame=options.steps_per_frame)
elif args[0] == 'xml':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
model = KMC_Model(banner=False, print_rates=False)
print(model.xml())
else:
parser.error('Command "%s" not understood.' % args[0])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
| gpl-3.0 |
rexshihaoren/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
postvakje/sympy | sympy/plotting/plot.py | 7 | 65097 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
import inspect
from collections import Callable
import warnings
import sys
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
def _arity(f):
"""
Python 2 and 3 compatible version that do not raise a Deprecation warning.
"""
if sys.version_info < (3,):
return len(inspect.getargspec(f)[0])
else:
param = inspect.signature(f).parameters.values()
return len([p for p in param if p.kind == p.POSITIONAL_OR_KEYWORD])
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = _arity(c)
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = _arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
calispac/digicampipe | digicampipe/scripts/spe.py | 1 | 14553 | #!/usr/bin/env python
"""
Do the Single Photoelectron anaylsis
Usage:
digicam-spe [options] [--] <INPUT>...
Options:
-h --help Show this screen.
--max_events=N Maximum number of events to analyse.
--max_histo_filename=FILE File path of the max histogram.
[Default: ./max_histo.pk]
--charge_histo_filename=FILE File path of the charge histogram
[Default: ./charge_histo.pk]
--raw_histo_filename=FILE File path of the raw histogram
[Default: ./raw_histo.pk]
-o OUTPUT --output=OUTPUT Output file path to store the results.
[Default: ./results.npz]
-c --compute Compute the data.
-f --fit Fit.
-d --display Display.
-v --debug Enter the debug mode.
-p --pixel=<PIXEL> Give a list of pixel IDs.
--shift=N Number of bins to shift before integrating
[default: 0].
--integral_width=N Number of bins to integrate over
[default: 7].
--pulse_finder_threshold=F Threshold of pulse finder in arbitrary units
[default: 2.0].
--save_figures=PATH Save the plots to the indicated folder.
Figures are not saved is set to none
[default: none]
--ncall=N Number of calls for the fit [default: 10000]
--n_samples=N Number of samples per waveform
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from histogram.histogram import Histogram1D
from tqdm import tqdm
from digicampipe.calib.baseline import fill_baseline, subtract_baseline
from digicampipe.calib.charge import compute_charge
from digicampipe.calib.peak import find_pulse_with_max, \
find_pulse_fast
from digicampipe.io.event_stream import calibration_event_stream
from digicampipe.scripts import raw
from digicampipe.scripts.fmpe import FMPEFitter
from digicampipe.utils.docopt import convert_pixel_args, \
convert_int, convert_text
from digicampipe.utils.pdf import fmpe_pdf_10
class MaxHistoFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 2
super(MaxHistoFitter, self).__init__(histogram, estimated_gain,
n_peaks, **kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_0': None, 'a_1': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_0, a_1):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': a_0, 'a_1': a_1, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
class SPEFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 4
super(SPEFitter, self).__init__(histogram, estimated_gain, n_peaks,
**kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_1': None, 'a_2': None, 'a_3': None,
'a_4': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_1, a_2, a_3, a_4):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': 0, 'a_1': a_1, 'a_2': a_2,
'a_3': a_3, 'a_4': a_4, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
def initialize_fit(self):
init_params = super(SPEFitter, self).initialize_fit()
init_params['a_4'] = init_params['a_3']
init_params['a_3'] = init_params['a_2']
init_params['a_2'] = init_params['a_1']
init_params['a_1'] = init_params['a_0']
init_params['baseline'] = init_params['baseline'] - init_params['gain']
del init_params['a_0']
self.initial_parameters = init_params
return init_params
def compute_dark_rate(number_of_zeros, total_number_of_events, time):
p_0 = number_of_zeros / total_number_of_events
rate = - np.log(p_0)
rate /= time
return rate
def compute_max_histo(files, histo_filename, pixel_id, max_events,
integral_width, shift, baseline):
n_pixels = len(pixel_id)
if not os.path.exists(histo_filename):
events = calibration_event_stream(files, pixel_id=pixel_id,
max_events=max_events)
# events = compute_baseline_with_min(events)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
events = find_pulse_with_max(events)
events = compute_charge(events, integral_width, shift)
max_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * integral_width,
4095 * integral_width),
)
for event in events:
max_histo.fill(event.data.reconstructed_charge)
max_histo.save(histo_filename)
return max_histo
else:
max_histo = Histogram1D.load(histo_filename)
return max_histo
def compute_spe(files, histo_filename, pixel_id, baseline, max_events,
integral_width, shift, pulse_finder_threshold, debug=False):
if not os.path.exists(histo_filename):
n_pixels = len(pixel_id)
events = calibration_event_stream(files,
max_events=max_events,
pixel_id=pixel_id)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
# events = find_pulse_1(events, 0.5, 20)
# events = find_pulse_2(events, widths=[5, 6], threshold_sigma=2)
events = find_pulse_fast(events, threshold=pulse_finder_threshold)
# events = find_pulse_fast_2(events, threshold=pulse_finder_threshold,
# min_dist=3)
# events = find_pulse_correlate(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_gaussian_filter(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_wavelets(events, widths=[4, 5, 6],
# threshold_sigma=2)
events = compute_charge(events, integral_width=integral_width,
shift=shift)
# events = compute_amplitude(events)
# events = fit_template(events)
# events = compute_full_waveform_charge(events)
spe_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * 50, 4095 * 50)
)
for event in events:
spe_histo.fill(event.data.reconstructed_charge)
spe_histo.save(histo_filename)
return spe_histo
else:
spe_histo = Histogram1D.load(histo_filename)
return spe_histo
def entry():
args = docopt(__doc__)
files = args['<INPUT>']
debug = args['--debug']
max_events = convert_int(args['--max_events'])
raw_histo_filename = args['--raw_histo_filename']
charge_histo_filename = args['--charge_histo_filename']
max_histo_filename = args['--max_histo_filename']
results_filename = args['--output']
pixel_id = convert_pixel_args(args['--pixel'])
n_pixels = len(pixel_id)
integral_width = int(args['--integral_width'])
shift = int(args['--shift'])
pulse_finder_threshold = float(args['--pulse_finder_threshold'])
n_samples = int(args['--n_samples']) # TODO access this in a better way !
estimated_gain = 20
ncall = int(args['--ncall'])
if args['--compute']:
raw_histo = raw.compute(files, max_events=max_events,
pixel_id=pixel_id, filename=raw_histo_filename)
baseline = raw_histo.mode()
compute_max_histo(files, max_histo_filename, pixel_id, max_events,
integral_width, shift, baseline)
compute_spe(files, charge_histo_filename, pixel_id, baseline,
max_events, integral_width, shift, pulse_finder_threshold,
debug=debug)
if args['--fit']:
spe_histo = Histogram1D.load(charge_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
dark_count_rate = np.zeros(n_pixels) * np.nan
electronic_noise = np.zeros(n_pixels) * np.nan
crosstalk = np.zeros(n_pixels) * np.nan
gain = np.zeros(n_pixels) * np.nan
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = max_histo[i]
fitter = MaxHistoFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
n_entries = histo.data.sum()
number_of_zeros = fitter.parameters['a_0']
window_length = 4 * n_samples
rate = compute_dark_rate(number_of_zeros,
n_entries,
window_length)
electronic_noise[i] = fitter.parameters['sigma_e']
dark_count_rate[i] = rate
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute dark count rate'
' in pixel {}'.format(pixel))
print(e)
np.savez(results_filename, dcr=dark_count_rate,
sigma_e=electronic_noise, pixel_id=pixel_id)
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = spe_histo[i]
fitter = SPEFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
params = fitter.parameters
n_entries = params['a_1']
n_entries += params['a_2']
n_entries += params['a_3']
n_entries += params['a_4']
crosstalk[i] = (n_entries - params['a_1']) / n_entries
gain[i] = params['gain']
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute gain and crosstalk'
' in pixel {}'.format(pixel))
print(e)
data = dict(np.load(results_filename))
data['crosstalk'] = crosstalk
data['gain'] = gain
np.savez(results_filename, **data)
save_figure = convert_text(args['--save_figures'])
if save_figure is not None:
output_path = save_figure
spe_histo = Histogram1D.load(charge_histo_filename)
spe_amplitude = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(raw_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
figure_directory = output_path + 'figures/'
if not os.path.exists(figure_directory):
os.makedirs(figure_directory)
histograms = [spe_histo, spe_amplitude, raw_histo, max_histo]
names = ['histogram_charge/', 'histogram_amplitude/', 'histogram_raw/',
'histo_max/']
for i, histo in enumerate(histograms):
figure = plt.figure()
histogram_figure_directory = figure_directory + names[i]
if not os.path.exists(histogram_figure_directory):
os.makedirs(histogram_figure_directory)
for j, pixel in enumerate(pixel_id):
axis = figure.add_subplot(111)
figure_path = histogram_figure_directory + 'pixel_{}'. \
format(pixel)
try:
histo.draw(index=(j,), axis=axis, log=True, legend=False)
figure.savefig(figure_path)
except Exception as e:
print('Could not save pixel {} to : {} \n'.
format(pixel, figure_path))
print(e)
axis.remove()
if args['--display']:
spe_histo = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(os.path.join(output_path,
raw_histo_filename))
max_histo = Histogram1D.load(max_histo_filename)
spe_histo.draw(index=(0,), log=True, legend=False)
raw_histo.draw(index=(0,), log=True, legend=False)
max_histo.draw(index=(0,), log=True, legend=False)
try:
data = np.load(results_filename)
dark_count_rate = data['dcr']
electronic_noise = data['sigma_e']
crosstalk = data['crosstalk']
gain = data['gain']
except IOError as e:
print(e)
print('Could not find the analysis files !')
plt.figure()
plt.hist(dark_count_rate[np.isfinite(dark_count_rate)],
bins='auto')
plt.xlabel('dark count rate [GHz]')
plt.legend(loc='best')
plt.figure()
plt.hist(crosstalk[np.isfinite(crosstalk)],
bins='auto')
plt.xlabel('Crosstalk []')
plt.legend(loc='best')
plt.figure()
plt.hist(gain[np.isfinite(gain)],
bins='auto')
plt.xlabel('Gain [LSB/p.e.]')
plt.legend(loc='best')
plt.figure()
plt.hist(electronic_noise[np.isfinite(electronic_noise)],
bins='auto')
plt.xlabel('$\sigma_e$ [LSB]')
plt.legend(loc='best')
plt.show()
return
if __name__ == '__main__':
entry()
| gpl-3.0 |
ManuSchmi88/landlab | landlab/plot/imshow.py | 3 | 21050 | #! /usr/bin/env python
"""
Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.plot.imshow.imshow_grid
~landlab.plot.imshow.imshow_grid_at_cell
~landlab.plot.imshow.imshow_grid_at_node
"""
import numpy as np
import inspect
from landlab.field.scalar_data_fields import FieldError
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
from landlab.grid import CLOSED_BOUNDARY
from landlab.grid.raster import RasterModelGrid
from landlab.grid.voronoi import VoronoiDelaunayGrid
from landlab.utils.decorators import deprecated
def imshow_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid_at_node(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values
if values_at_node.size != grid.number_of_nodes:
raise ValueError('number of values does not match number of nodes')
values_at_node = np.ma.masked_where(
grid.status_at_node == CLOSED_BOUNDARY, values_at_node)
try:
shape = grid.shape
except AttributeError:
shape = (-1, )
_imshow_grid_values(grid, values_at_node.reshape(shape), **kwds)
if isinstance(values, str):
plt.title(values)
@deprecated(use='imshow_grid_at_node', version='0.5')
def imshow_node_grid(grid, values, **kwds):
imshow_grid_at_node(grid, values, **kwds)
def imshow_grid_at_cell(grid, values, **kwds):
"""Map view of grid data over all grid cells.
Prepares a map view of data over all cells in the grid.
Method can take any of the same ``**kwds`` as :func:`imshow_grid_at_node`.
Construction ::
imshow_grid_at_cell(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False, output=None)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Values at the cells on the grid. Alternatively, can be a field name
(string) from which to draw the data from the grid.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
Raises
------
ValueError
If input grid is not uniform rectilinear.
"""
if isinstance(values, str):
try:
values_at_cell = grid.at_cell[values]
except FieldError:
values_at_cell = grid.at_node[values]
else:
values_at_cell = values
if values_at_cell.size == grid.number_of_nodes:
values_at_cell = values_at_cell[grid.node_at_cell]
if values_at_cell.size != grid.number_of_cells:
raise ValueError('number of values must match number of cells or '
'number of nodes')
values_at_cell = np.ma.asarray(values_at_cell)
values_at_cell.mask = True
values_at_cell.mask[grid.core_cells] = False
myimage = _imshow_grid_values(grid,
values_at_cell.reshape(grid.cell_grid_shape),
**kwds)
if isinstance(values, str):
plt.title(values)
return myimage
@deprecated(use='imshow_grid_at_cell', version='0.5')
def imshow_cell_grid(grid, values, **kwds):
imshow_grid_at_cell(grid, values, **kwds)
def _imshow_grid_values(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=(None, None),
symmetric_cbar=False, cmap='pink', limits=None,
colorbar_label = None,
allow_colorbar=True, vmin=None, vmax=None,
norm=None, shrink=1., color_for_closed='black',
color_for_background=None, show_elements=False,
output=None):
gridtypes = inspect.getmro(grid.__class__)
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.)
if isinstance(grid, RasterModelGrid):
if values.ndim != 2:
raise ValueError('values must have ndim == 2')
y = np.arange(values.shape[0] + 1) * grid.dy - grid.dy * .5
x = np.arange(values.shape[1] + 1) * grid.dx - grid.dx * .5
kwds = dict(cmap=cmap)
(kwds['vmin'], kwds['vmax']) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds['vmin'], kwds['vmax']) = (- limit, limit)
elif limits is not None:
(kwds['vmin'], kwds['vmax']) = (limits[0], limits[1])
else:
if vmin is not None:
kwds['vmin'] = vmin
if vmax is not None:
kwds['vmax'] = vmax
if np.isclose(grid.dx, grid.dy):
if values.size == grid.number_of_nodes:
myimage = plt.imshow(
values.reshape(grid.shape), origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
else: # this is a cell grid, and has been reshaped already...
myimage = plt.imshow(values, origin='lower',
extent=(x[0], x[-1], y[0], y[-1]), **kwds)
myimage = plt.pcolormesh(x, y, values, **kwds)
plt.gca().set_aspect(1.)
plt.autoscale(tight=True)
if allow_colorbar:
cb = plt.colorbar(norm=norm, shrink=shrink)
if colorbar_label:
cb.set_label(colorbar_label)
elif VoronoiDelaunayGrid in gridtypes:
# This is still very much ad-hoc, and needs prettifying.
# We should save the modifications needed to plot color all the way
# to the diagram edge *into* the grid, for faster plotting.
# (see http://stackoverflow.com/questions/20515554/...
# colorize-voronoi-diagram)
# (This technique is not implemented yet)
from scipy.spatial import voronoi_plot_2d
import matplotlib.colors as colors
import matplotlib.cm as cmx
cm = plt.get_cmap(cmap)
if (limits is None) and ((vmin is None) and (vmax is None)):
# only want to work with NOT CLOSED nodes
open_nodes = grid.status_at_node != 4
if symmetric_cbar:
(var_min, var_max) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
limit = max(abs(var_min), abs(var_max))
(vmin, vmax) = (- limit, limit)
else:
(vmin, vmax) = (values.flat[
open_nodes].min(), values.flat[open_nodes].max())
elif limits is not None:
(vmin, vmax) = (limits[0], limits[1])
else:
open_nodes = grid.status_at_node != 4
if vmin is None:
vmin = values.flat[open_nodes].min()
if vmax is None:
vmax = values.flat[open_nodes].max()
cNorm = colors.Normalize(vmin, vmax)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
colorVal = scalarMap.to_rgba(values)
if show_elements:
myimage = voronoi_plot_2d(grid.vor, show_vertices=False,
show_points=False)
# show_points to be supported in scipy0.18, but harmless for now
mycolors = (i for i in colorVal)
for order in grid.vor.point_region:
region = grid.vor.regions[order]
colortouse = next(mycolors)
if -1 not in region:
polygon = [grid.vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=colortouse)
plt.gca().set_aspect(1.)
# plt.autoscale(tight=True)
# Tempting though it is to move the boundary outboard of the outermost
# nodes (e.g., to the outermost corners), this is a bad idea, as the
# outermost cells tend to have highly elongated shapes which make the
# plot look stupid
plt.xlim((np.min(grid.node_x), np.max(grid.node_x)))
plt.ylim((np.min(grid.node_y), np.max(grid.node_y)))
scalarMap.set_array(values)
if allow_colorbar:
cb = plt.colorbar(scalarMap, shrink=shrink)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == '-' and grid_units[0] == '-':
plt.xlabel('X')
plt.ylabel('Y')
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
else:
plt.xlabel('X (%s)' % grid_units[1])
plt.ylabel('Y (%s)' % grid_units[0])
if plot_name is not None:
plt.title('%s' % (plot_name))
if var_name is not None or var_units is not None:
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + ' (' + var_units + ')'
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = '(' + var_units + ')'
assert type(colorbar_label) is str
assert allow_colorbar
cb.set_label(colorbar_label)
if color_for_background is not None:
plt.gca().set_axis_bgcolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
def imshow_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes or cells in the grid.
Data is plotted as colored cells. If at='node', the surrounding cell is
shaded with the value at the node at its center. If at='cell', the cell
is shaded with its own value. Outer edges of perimeter cells are
extrapolated. Closed elements are colored uniformly (default black,
overridden with kwd 'color_for_closed'); other open boundary nodes get
their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab CLOSED_BOUNDARYs. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshow_grid`, as desired.
This function happily works with both regular and irregular grids.
Construction ::
imshow_grid(grid, values, plot_name=None, var_name=None,
var_units=None, grid_units=None,
symmetric_cbar=False, cmap='pink',
limits=(values.min(), values.max()),
vmin=values.min(), vmax=values.max(),
allow_colorbar=True, colorbar_label=None,
norm=[linear], shrink=1.,
color_for_closed='black',
color_for_background=None,
show_elements=False)
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node or cell values, or a field name as a string from which to draw
the data.
at : str, {'node', 'cell'}
Tells plotter where values are defined.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
allow_colorbar : bool
If True, include the colorbar.
colorbar_label : str or None
The string with which to label the colorbar.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed elements (default 'black'). If None, closed
(or masked) elements will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
show_elements : bool
If True, and grid is a Voronoi, the faces will be plotted in black
along with just the colour of the cell, defining the cell outlines
(defaults False).
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
"""
show = kwds.pop('show', False)
values_at = kwds.pop('values_at', 'node')
values_at = kwds.pop('at', values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == 'node':
imshow_grid_at_node(grid, values, **kwds)
elif values_at == 'cell':
imshow_grid_at_cell(grid, values, **kwds)
else:
raise TypeError('value location %s not understood' % values_at)
# retained for backwards compatibility:
if show:
plt.show()
| mit |
fyffyt/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
einarhuseby/arctic | arctic/_util.py | 3 | 1846 | from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from pymongo.errors import OperationFailure
import string
import logging
logger = logging.getLogger(__name__)
def indent(s, num_spaces):
s = string.split(s, '\n')
s = [(num_spaces * ' ') + line for line in s]
s = string.join(s, '\n')
return s
def are_equals(o1, o2, **kwargs):
try:
if isinstance(o1, DataFrame):
assert_frame_equal(o1, o2, kwargs)
return True
return o1 == o2
except Exception:
return False
def enable_sharding(arctic, library_name, hashed=False):
c = arctic._conn
lib = arctic[library_name]._arctic_lib
dbname = lib._db.name
library_name = lib.get_top_level_collection().name
try:
c.admin.command('enablesharding', dbname)
except OperationFailure, e:
if not 'failed: already enabled' in str(e):
raise
if not hashed:
logger.info("Range sharding 'symbol' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={'symbol': 1})
else:
logger.info("Hash sharding 'symbol' on: " + dbname + '.' + library_name)
c.admin.command('shardCollection', dbname + '.' + library_name, key={'symbol': 'hashed'})
def enable_powerof2sizes(arctic, library_name):
lib = arctic[library_name]._arctic_lib
collection = lib.get_top_level_collection()
lib._db.command({"collMod": collection.name, 'usePowerOf2Sizes': "true"})
logger.info("usePowerOf2Sizes enabled for %s", collection.name)
for coll in collection.database.collection_names():
if coll.startswith("%s." % collection.name):
lib._db.command({"collMod": coll, 'usePowerOf2Sizes': "true"})
logger.info("usePowerOf2Sizes enabled for %s", coll)
| lgpl-2.1 |
chrismattmann/tika-similarity | sk_kmeans.py | 2 | 4409 | #!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from tika import parser
import pandas as pd
from vector import Vector
from sklearn.cluster import KMeans
import argparse, os, json
def filterFiles(inputDir, acceptTypes):
filename_list = []
for root, dirnames, files in os.walk(inputDir):
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in files:
if not filename.startswith('.'):
filename_list.append(os.path.join(root, filename))
filename_list = (filename for filename in filename_list if "metadata" in parser.from_file(filename))
if acceptTypes:
filename_list = (filename for filename in filename_list if str(parser.from_file(filename)['metadata']['Content-Type'].encode('utf-8').decode('utf-8')).split('/')[-1] in acceptTypes)
else:
print("Accepting all MIME Types.....")
return filename_list
if __name__ == "__main__":
argParser = argparse.ArgumentParser('k-means Clustering of documents based on metadata values')
argParser.add_argument('--inputDir', required=True, help='path to directory containing files')
argParser.add_argument('--outJSON', required=True, help='/path/to/clusters.json containing k-means cluster assignments')
argParser.add_argument('--Kvalue', help='number of clusters to find')
#argParser.add_argument('--findK', action='store_true', help='find the optimal value of K')
argParser.add_argument('--accept', nargs='+', type=str, help='Optional: compute similarity only on specified IANA MIME Type(s)')
args = argParser.parse_args()
# cluster for a particular value of K
# if args.inputDir and args.outJSON and args.findK:
if args.inputDir and args.outJSON and args.Kvalue:
list_of_points = []
for eachFile in filterFiles(args.inputDir, args.accept):
list_of_points.append(Vector(eachFile, parser.from_file(eachFile)["metadata"]))
list_of_Dicts = (point.features for point in list_of_points)
df = pd.DataFrame(list_of_Dicts)
df = df.fillna(0)
print(df.shape)
kmeans = KMeans(n_clusters=int(args.Kvalue),
init='k-means++',
max_iter=300, # k-means convergence
n_init=10, # find global minima
n_jobs=-2, # parallelize
)
labels = kmeans.fit_predict(df) # unsupervised (X, y=None)
print(labels) # kmeans.labels_
clusters = {}
for i in range(0, len(labels)):
node = { "metadata": json.dumps(list_of_points[i].features),
"name": list_of_points[i].filename.split('/')[-1],
"path": list_of_points[i].filename
}
try:
clusters[str(labels[i])].append(node)
except KeyError:
clusters[str(labels[i])] = []
clusters[str(labels[i])].append(node)
# generate clusters.JSON
with open(args.outJSON, "w") as jsonF:
json_data = {"name": "clusters"}
children = []
for key in clusters:
cluster_children = {"name": "cluster"+key, "children": clusters[key]}
children.append(cluster_children)
json_data["children"] = children
json.dump(json_data, jsonF)
# print matplotlib
# user chooses k => generates k
# find elbow
#kmeans.transform()
# String Length Of Course
# df.to_csv("bashhshs.csv", sep=',')
| apache-2.0 |
samuelstjean/dipy | scratch/very_scratch/diffusion_sphere_stats.py | 20 | 18082 | import nibabel
import os
import numpy as np
import dipy as dp
#import dipy.core.generalized_q_sampling as dgqs
import dipy.reconst.gqi as dgqs
import dipy.reconst.dti as ddti
import dipy.reconst.recspeed as rp
import dipy.io.pickles as pkl
import scipy as sp
from matplotlib.mlab import find
#import dipy.core.sphere_plots as splots
import dipy.core.sphere_stats as sphats
import dipy.core.geometry as geometry
import get_vertices as gv
#old SimData files
'''
results_SNR030_1fibre
results_SNR030_1fibre+iso
results_SNR030_2fibres_15deg
results_SNR030_2fibres_30deg
results_SNR030_2fibres_60deg
results_SNR030_2fibres_90deg
results_SNR030_2fibres+iso_15deg
results_SNR030_2fibres+iso_30deg
results_SNR030_2fibres+iso_60deg
results_SNR030_2fibres+iso_90deg
results_SNR030_isotropic
'''
#fname='/home/ian/Data/SimData/results_SNR030_1fibre'
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 1000 * 100 is the number of all rows.
The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80,
80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80,
120, 160, 200, 240, 280, 320, 360.
'''
#new complete SimVoxels files
simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00',
'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7',
'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00']
simdir = '/home/ian/Data/SimVoxels/'
def gq_tn_calc_save():
for simfile in simdata:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients)
gqfile = simdir+'gq/'+dataname+'.pkl'
pkl.save_pickle(gqfile,gq)
'''
gq.IN gq.__doc__ gq.glob_norm_param
gq.QA gq.__init__ gq.odf
gq.__class__ gq.__module__ gq.q2odf_params
'''
tn = ddti.Tensor(sim_data,bvals,gradients)
tnfile = simdir+'tn/'+dataname+'.pkl'
pkl.save_pickle(tnfile,tn)
'''
tn.ADC tn.__init__ tn._getevals
tn.B tn.__module__ tn._getevecs
tn.D tn.__new__ tn._getndim
tn.FA tn.__reduce__ tn._getshape
tn.IN tn.__reduce_ex__ tn._setevals
tn.MD tn.__repr__ tn._setevecs
tn.__class__ tn.__setattr__ tn.adc
tn.__delattr__ tn.__sizeof__ tn.evals
tn.__dict__ tn.__str__ tn.evecs
tn.__doc__ tn.__subclasshook__ tn.fa
tn.__format__ tn.__weakref__ tn.md
tn.__getattribute__ tn._evals tn.ndim
tn.__getitem__ tn._evecs tn.shape
tn.__hash__ tn._getD
'''
''' file has one row for every voxel, every voxel is repeating 1000
times with the same noise level , then we have 100 different
directions. 100 * 1000 is the number of all rows.
At the moment this module is hardwired to the use of the EDS362
spherical mesh. I am assumung (needs testing) that directions 181 to 361
are the antipodal partners of directions 0 to 180. So when counting the
number of different vertices that occur as maximal directions we wll map
the indices modulo 181.
'''
def analyze_maxima(indices, max_dirs, subsets):
'''This calculates the eigenstats for each of the replicated batches
of the simulation data
'''
results = []
for direction in subsets:
batch = max_dirs[direction,:,:]
index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))])
#normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch)
centre, b1 = sphats.eigenstats(batch)
# make azimuth be in range (0,360) rather than (-180,180)
centre[1] += 360*(centre[1] < 0)
#results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety)))
results.append(np.concatenate((centre, b1, index_variety)))
return results
#dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3))
# these are the principal directions for the full set of simulations
#gq_tn_calc_save()
#eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz'))
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
def run_comparisons(sample_data=35):
for simfile in [simdata[sample_data]]:
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
gqfile = simdir+'gq/'+dataname+'.pkl'
gq = pkl.load_pickle(gqfile)
tnfile = simdir+'tn/'+dataname+'.pkl'
tn = pkl.load_pickle(tnfile)
dt_first_directions_in=odf_vertices[tn.IN]
dt_indices = tn.IN.reshape((100,1000))
dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90))
gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000)
out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w')
#print np.vstack(dt_results).shape, np.vstack(gq_results).shape
results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#print results.shape
#results = np.vstack(dt_results)
print >> out, results[:,:]
out.close()
#up = dt_batch[:,2]>= 0
#splots.plot_sphere(dt_batch[up], 'batch '+str(direction))
#splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre)
#spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95%
#rotated = np.dot(dt_batch,evecs)
#rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0])
#eval_order = np.argsort(rot_evals)
#rotated = rotated[:,eval_order]
#up = rotated[:,2]>= 0
#splot.plot_sphere(rotated[up],'first1000')
#splot.plot_lambert(rotated[up],'batch '+str(direction))
def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]):
results = []
out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w')
for j in range(len(sample_data)):
sample = sample_data[j]
simfile = simdata[sample]
dataname = simfile
print dataname
sim_data=np.loadtxt(simdir+dataname)
marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt'
b_vals_dirs=np.loadtxt(marta_table_fname)
bvals=b_vals_dirs[:,0]*1000
gradients=b_vals_dirs[:,1:]
for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel():
# 0,1,1000,1001,2000,2001,...
s = sim_data[j,:]
gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5)
tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS')
t0, t1, t2, npa = gqs.npa(s, width = 5)
print >> out, dataname, j, npa, tn.fa()[0]
'''
for (i,o) in enumerate(gqs.odf(s)):
print i,o
for (i,o) in enumerate(gqs.odf_vertices):
print i,o
'''
#o = gqs.odf(s)
#v = gqs.odf_vertices
#pole = v[t0[0]]
#eqv = dgqs.equatorial_zone_vertices(v, pole, 5)
#print 'Number of equatorial vertices: ', len(eqv)
#print np.max(o[eqv]),np.min(o[eqv])
#cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv]
#print np.min(cos1), np.max(cos1)
#print 'equatorial max in equatorial vertices:', t1[0] in eqv
#x = np.cross(v[t0[0]],v[t1[0]])
#x = x/np.sqrt(np.sum(x**2))
#print x
#ptchv = dgqs.patch_vertices(v, x, 5)
#print len(ptchv)
#eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])]
#print (eqp, o[eqp])
#print t2[0] in ptchv, t2[0] in eqv
#print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]])
#print ptchv[np.argmin([o[v] for v in ptchv])]
#gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000))
#gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')]
#print gq_first_directions_in.shape
#gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100))
#for gqi see example dicoms_2_tracks gq.IN[:,0]
#np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000)
#out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w')
#results = np.hstack((np.vstack(dt_results), np.vstack(gq_results)))
#results = np.vstack(dt_results)
#print >> out, results[:,:]
out.close()
def run_small_data():
#smalldir = '/home/ian/Devel/dipy/dipy/data/'
smalldir = '/home/eg309/Devel/dipy/dipy/data/'
# from os.path import join as opj
# bvals=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.bvals.npy'))
bvals=np.load(smalldir+'small_64D.bvals.npy')
# gradients=np.load(opj(os.path.dirname(__file__), \
# 'data','small_64D.gradients.npy'))
gradients=np.load(smalldir+'small_64D.gradients.npy')
# img =ni.load(os.path.join(os.path.dirname(__file__),\
# 'data','small_64D.nii'))
img=nibabel.load(smalldir+'small_64D.nii')
small_data=img.get_data()
print 'real_data', small_data.shape
gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients)
tnsmall = ddti.Tensor(small_data,bvals,gradients)
x,y,z,a,b=tnsmall.evecs.shape
evecs=tnsmall.evecs
xyz=x*y*z
evecs = evecs.reshape(xyz,3,3)
#vs = np.sign(evecs[:,2,:])
#print vs.shape
#print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape
#evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3)
#print evecs.shape
evals=tnsmall.evals
evals = evals.reshape(xyz,3)
#print evals.shape
#print('GQS in %d' %(t2-t1))
'''
eds=np.load(opj(os.path.dirname(__file__),\
'..','matrices',\
'evenly_distributed_sphere_362.npz'))
'''
from dipy.data import get_sphere
odf_vertices,odf_faces=get_sphere('symmetric362')
#odf_vertices=eds['vertices']
#odf_faces=eds['faces']
#Yeh et.al, IEEE TMI, 2010
#calculate the odf using GQI
scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free
#water diffusion coefficient
#l_values sqrt(6 D tau) D free water
#diffusion coefficiet and tau included in the b-value
tmp=np.tile(scaling,(3,1))
b_vector=gradients.T*tmp
Lambda = 1.2 # smoothing parameter - diffusion sampling length
q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
#implements equation no. 9 from Yeh et.al.
S=small_data.copy()
x,y,z,g=S.shape
S=S.reshape(x*y*z,g)
QA = np.zeros((x*y*z,5))
IN = np.zeros((x*y*z,5))
FA = tnsmall.fa().reshape(x*y*z)
fwd = 0
#Calculate Quantitative Anisotropy and find the peaks and the indices
#for every voxel
summary = {}
summary['vertices'] = odf_vertices
v = odf_vertices.shape[0]
summary['faces'] = odf_faces
f = odf_faces.shape[0]
for (i,s) in enumerate(S):
#print 'Volume %d' % i
istr = str(i)
summary[istr] = {}
t0, t1, t2, npa = gqsmall.npa(s, width = 5)
summary[istr]['triple']=(t0,t1,t2)
summary[istr]['npa']=npa
odf = Q2odf(s,q2odf_params)
peaks,inds=rp.peak_finding(odf,odf_faces)
fwd=max(np.max(odf),fwd)
#peaks = peaks - np.min(odf)
n_peaks=min(len(peaks),5)
peak_heights = [odf[i] for i in inds[:n_peaks]]
#QA[i][:l] = peaks[:n_peaks]
IN[i][:n_peaks] = inds[:n_peaks]
summary[istr]['odf'] = odf
summary[istr]['peaks'] = peaks
summary[istr]['inds'] = inds
summary[istr]['evecs'] = evecs[i,:,:]
summary[istr]['evals'] = evals[i,:]
summary[istr]['n_peaks'] = n_peaks
summary[istr]['peak_heights'] = peak_heights
# summary[istr]['fa'] = tnsmall.fa()[0]
summary[istr]['fa'] = FA[i]
'''
QA/=fwd
QA=QA.reshape(x,y,z,5)
IN=IN.reshape(x,y,z,5)
'''
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
#peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2]
#peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3]
print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3)
return FA, summary
def Q2odf(s,q2odf_params):
''' construct odf for a voxel '''
odf=np.dot(s,q2odf_params)
return odf
#run_comparisons()
#run_gq_sims()
FA, summary = run_small_data()
peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1]
peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2]
peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3]
fa_npa_1 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_1]
fa_npa_2 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_2]
fa_npa_3 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_3]
| bsd-3-clause |
gsmaxwell/phase_offset_rx | gnuradio-core/src/examples/pfb/fmtest.py | 17 | 7785 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = gr.sig_source_c (if_rate, # sample rate
gr.GR_SIN_WAVE, # waveform type
lo_freq, #frequency
1.0, # amplitude
0) # DC Offset
mixer = gr.multiply_cc ()
self.connect (self, fmtx, (mixer, 0))
self.connect (lo, (mixer, 1))
self.connect (mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = gr.add_cc ()
for n in xrange(self._N):
sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = gr.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = gr.vector_sink_c()
self.channel = blks2.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=gr.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(blks2.standard_squelch(self._audio_rate*10))
self.snks.append(gr.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
wateraccounting/wa | Collect/CFSR/DataAccess_CFSR.py | 1 | 8868 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/CFSR
"""
# General modules
import pandas as pd
import os
import numpy as np
from netCDF4 import Dataset
import re
from joblib import Parallel, delayed
# WA+ modules
from wa.Collect.CFSR.Download_data_CFSR import Download_data
from wa.General import data_conversions as DC
def CollectData(Dir, Var, Startdate, Enddate, latlim, lonlim, Waitbar, cores, Version):
"""
This function collects daily CFSR data in geotiff format
Keyword arguments:
Dir -- 'C:/file/to/path/'
Var -- 'dlwsfc','dswsfc','ulwsfc', or 'uswsfc'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -50 and 50)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
Waitbar -- 1 (Default) will print a wait bar
cores -- The number of cores used to run the routine.
It can be 'False' to avoid using parallel computing
routines.
Version -- 1 or 2 (1 = CFSR, 2 = CFSRv2)
"""
# Creates an array of the days of which the ET is taken
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# For collecting CFSR data
if Version == 1:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9171038899 or latlim[1] > 89.9171038899:
print 'Latitude above 89.917N or below 89.917S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9171038899)
latlim[1] = np.minimum(latlim[1],89.9171038899)
if lonlim[0] < -180 or lonlim[1] > 179.843249782:
print 'Longitude must be between 179.84E and 179.84W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.843249782)
# Make directory for the CFSR data
output_folder=os.path.join(Dir,'Radiation','CFSR')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# For collecting CFSRv2 data
if Version == 2:
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -89.9462116040955806 or latlim[1] > 89.9462116040955806:
print 'Latitude above 89.917N or below 89.946S is not possible. Value set to maximum'
latlim[0] = np.maximum(latlim[0],-89.9462116040955806)
latlim[1] = np.minimum(latlim[1],89.9462116040955806)
if lonlim[0] < -180 or lonlim[1] > 179.8977275:
print 'Longitude must be between 179.90E and 179.90W. Now value is set to maximum'
lonlim[0] = np.maximum(lonlim[0],-180)
lonlim[1] = np.minimum(lonlim[1],179.8977275)
# Make directory for the CFSRv2 data
output_folder=os.path.join(Dir,'Radiation','CFSRv2')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# Pass variables to parallel function and run
args = [output_folder, latlim, lonlim, Var, Version]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
# Remove all .nc and .grb2 files
for f in os.listdir(output_folder):
if re.search(".nc", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grb2", f):
os.remove(os.path.join(output_folder, f))
for f in os.listdir(output_folder):
if re.search(".grib2", f):
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
# unpack the arguments
[output_folder, latlim, lonlim, Var, Version] = args
# Name of the model
if Version == 1:
version_name = 'CFSR'
if Version == 2:
version_name = 'CFSRv2'
# Name of the outputfile
if Var == 'dlwsfc':
Outputname = 'DLWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'dswsfc':
Outputname = 'DSWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'ulwsfc':
Outputname = 'ULWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
if Var == 'uswsfc':
Outputname = 'USWR_%s_W-m2_' %version_name + str(Date.strftime('%Y')) + '.' + str(Date.strftime('%m')) + '.' + str(Date.strftime('%d')) + '.tif'
# Create the total end output name
outputnamePath = os.path.join(output_folder, Outputname)
# If the output name not exists than create this output
if not os.path.exists(outputnamePath):
local_filename = Download_data(Date, Version, output_folder, Var)
# convert grb2 to netcdf (wgrib2 module is needed)
for i in range(0,4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i+1) + '.nc'
# Total path of the output
FileNC6hour = os.path.join(output_folder, nameNC)
# Band number of the grib data which is converted in .nc
band=(int(Date.strftime('%d')) - 1) * 28 + (i + 1) * 7
# Convert the data
DC.Convert_grb2_to_nc(local_filename, FileNC6hour, band)
if Version == 1:
if Date < pd.Timestamp(pd.datetime(2011, 01, 01)):
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.1562497) / 0.3125)
Xend = np.ceil((lonlim[1] + 180.1562497) / 0.3125) + 1
Ystart = np.floor((latlim[0] + 89.9171038899) / 0.3122121663)
Yend = np.ceil((latlim[1] + 89.9171038899) / 0.3122121663)
# Create a new dataset
Datatot = np.zeros([576, 1152])
else:
Version = 2
if Version == 2:
# Convert the latlim and lonlim into array
Xstart = np.floor((lonlim[0] + 180.102272725) / 0.204545)
Xend = np.ceil((lonlim[1] + 180.102272725) / 0.204545) + 1
Ystart = np.floor((latlim[0] + 89.9462116040955806) / 0.204423)
Yend = np.ceil((latlim[1] + 89.9462116040955806) / 0.204423)
# Create a new dataset
Datatot = np.zeros([880, 1760])
# Open 4 times 6 hourly dataset
for i in range (0, 4):
nameNC = 'Output' + str(Date.strftime('%Y')) + str(Date.strftime('%m')) + str(Date.strftime('%d')) + '-' + str(i + 1) + '.nc'
FileNC6hour = os.path.join(output_folder, nameNC)
f = Dataset(FileNC6hour, mode = 'r')
Data = f.variables['Band1'][0:int(Datatot.shape[0]), 0:int(Datatot.shape[1])]
f.close()
data = np.array(Data)
Datatot = Datatot + data
# Calculate the average in W/m^2 over the day
DatatotDay = Datatot / 4
DatatotDayEnd = np.zeros([int(Datatot.shape[0]), int(Datatot.shape[1])])
DatatotDayEnd[:,0:int(Datatot.shape[0])] = DatatotDay[:, int(Datatot.shape[0]):int(Datatot.shape[1])]
DatatotDayEnd[:,int(Datatot.shape[0]):int(Datatot.shape[1])] = DatatotDay[:, 0:int(Datatot.shape[0])]
# clip the data to the extent difined by the user
DatasetEnd = DatatotDayEnd[int(Ystart):int(Yend), int(Xstart):int(Xend)]
# save file
if Version == 1:
pixel_size = 0.3125
if Version == 2:
pixel_size = 0.204545
geo = [lonlim[0],pixel_size,0,latlim[1],0,-pixel_size]
DC.Save_as_tiff(data = np.flipud(DatasetEnd), name = outputnamePath, geo = geo, projection = "WGS84")
return()
| apache-2.0 |
jbloom/mutpath | src/plot.py | 1 | 10257 | """Module for performing plotting for ``mutpath`` package.
This module uses ``pylab`` and ``matplotlib`` to make plots. These plots will
fail if ``pylab`` and ``matplotlib`` are not available for importation. Before
running any function in this module, you can run the *PylabAvailable*
function to determine if ``pylab`` and ``matplotlib`` are available. Otherwise,
calling any other function will raise an Exception if thise modules are
not available. The ``pdf`` backend is used for ``matplotlib`` / ``pylab``. This means
that plots must be created as PDF files.
Functions are:
`PylabAvailable`
`CumulativeFractionPlot`
'DatesPlot`
`Base10Formatter`
`SplitLabel`
Written by Jesse Bloom.
"""
import os
import sys
import math
# global variable _pylabavailable indicates if pylab/matplotlib present
try:
import matplotlib
matplotlib.use('pdf')
import pylab
_pylabavailable = True
except ImportError:
_pylabavailable = False
def PylabAvailable():
"""Returns True if pylab/matplotlib available, False otherwise.
You should call this function to test for the availability of the
pylab/matplotlib plotting modules before using other functions in
this module.
"""
return _pylabavailable
def DatesPlot(mutdates, plotfile, interval):
"""Plots dates of mutations.
Uses pylab / matplotlib to plot the dates and credible intervals
for mutations. Will raise an error *PylabAvailable() == False*.
The plot is a PDF.
* *mutdates* is a list of the mutations, in the form of the tuples
*(median, mininterval, maxinterval, mut, fractoca, weight)*. Mutations
are plotted in the order they are listed. In these tuples:
* *median* : posterior median date
* *minterval* : minimum of credible interval
* *maxinterval* : maximum of credible interval
* *mut* : string giving name of mutation
* *fractoca* : probability mutation is on path from common ancestor
to starting sequence
* *weight* : fraction of paths containing mutation.
* *plotfile* is a string giving the name of the PDF file we create.
* *interval* is the range of the credible interval. For example, 0.9
means a 90% credible interval.
"""
ext = os.path.splitext(plotfile)[1].lower()
if ext != '.pdf':
raise ValueError("Extension must be .pdf, but found %s" % ext)
if not PylabAvailable():
raise ValueError("pylab / matplotlib not available.")
if not mutdates:
raise ValueError("no mutation dates to plot")
tocalabels = []
tocamedians = []
tocaerrlow = []
tocaerrhigh = []
tocays = []
fromcalabels = []
fromcamedians = []
fromcaerrlow = []
fromcaerrhigh = []
fromcays = []
y = 0
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
label = "%s" % (mut)
errlow = median - mininterval
errhigh = maxinterval - median
if fractoca > 0.5:
tocays.append(y)
tocalabels.append(label)
tocamedians.append(median)
tocaerrlow.append(errlow)
tocaerrhigh.append(errhigh)
else:
fromcays.append(y)
fromcalabels.append(label)
fromcamedians.append(median)
fromcaerrlow.append(errlow)
fromcaerrhigh.append(errhigh)
y += 1
(lmargin, rmargin, bmargin, tmargin) = (0.11, 0.05, 0.08, 0.01)
matplotlib.rc('font', size=10)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
matplotlib.rc('legend', numpoints=1)
matplotlib.rc('legend', fontsize=10)
fig = pylab.figure(figsize=(6, 6))
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 - tmargin - bmargin])
tocabar = fromcabar = None
if tocalabels:
tocabar = pylab.errorbar(tocamedians, tocays, xerr=[tocaerrlow, tocaerrhigh], fmt='sr')
if fromcalabels:
fromcabar = pylab.errorbar(fromcamedians, fromcays, xerr=[fromcaerrlow, fromcaerrhigh], fmt='sb')
ny = len(mutdates)
pylab.gca().set_ylim((-1, ny))
pylab.gca().yaxis.set_major_locator(matplotlib.ticker.FixedLocator([y for y in range(ny)]))
pylab.gca().yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(tocalabels + fromcalabels))
pylab.gca().xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
pylab.xlabel("Date (posterior median and Bayesian %.2f%s credible interval)" % (interval * 100, '%'))
if tocabar and fromcabar:
pylab.legend([tocabar[0], fromcabar[0]], ['path to common ancestor', 'path from common ancestor'], loc='lower right')
elif tocabar:
pylab.legend([tocabar[0]], ['path to common ancestor'], loc='lower right')
elif fromcabar:
pylab.legend([fromcabar[0]], ['path from common ancestor'], loc='lower right')
pylab.savefig(plotfile)
def CumulativeFractionPlot(datalist, plotfile, title, xlabel):
"""Creates a cumulative fraction plot.
Takes a list of numeric data. Plots a cumulative fraction
plot giving the fraction of the data points that are <=
the indicated value.
*datalist* is a list of numbers giving the data for which we
are computing the cumulative fraction plot. Raises an
exception if this is an empty list.
*plotfile* is the name of the output plot file created by this method
(such as 'plot.pdf'). The extension must be '.pdf'.
*title* is a string placed above the plot as a title. Uses LaTex
formatting.
*xlabel* is the label given to the X-axis. Uses LaTex formatting.
This function uses pylab / matplotlib. It will raise an Exception if
these modules cannot be imported (if PylabAvailable() is False).
"""
if len(datalist) < 1:
raise ValueError("datalist is empty")
if not _pylabavailable:
raise ImportError("Could not find pylab or matplotlib")
if os.path.splitext(plotfile)[1] != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
datalist.sort() # sort from smallest to largest
(xmin, xmax) = (datalist[0], datalist[-1])
n = len(datalist)
cumfracs = []
cf = 0.0
for x in datalist:
cf += 1. / n
cumfracs.append(cf)
assert len(datalist) == len(cumfracs)
assert abs(1.0 - cf) < 1e-7
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=12)
fig = pylab.figure(figsize=(6, 4))
(lmargin, rmargin, bmargin, tmargin) = (0.1, 0.01, 0.15, 0.1)
ax = pylab.axes([lmargin, bmargin, 1 - lmargin - rmargin, 1 -\
bmargin - tmargin])
pylab.plot(datalist, cumfracs, 'r-')
pylab.gca().set_ylim([0, 1])
pylab.gca().set_xlim([xmin, xmax])
pylab.ylabel('cumulative fraction')
pylab.xlabel(xlabel)
pylab.title(title)
if plotfile:
pylab.savefig(plotfile)
pylab.clf()
pylab.close()
def Base10Formatter(number, exp_cutoff, exp_decimal_digits, decimal_digits):
"""Converts a number into Latex formatting with scientific notation.
Takes a number and converts it to a string that can be shown
in LaTex using math mode. It is converted to scientific notation
if the criteria specified by exp_cutoff.
*number* the number to be formatted, should be a float or integer.
Currently only works for numbers >= 0
*exp_cutoff* convert to scientific notation if abs(math.log10(number)) >= this.
*exp_decimal_digits* show this many digits after the decimal if number
is converted to scientific notation.
*decimal_digits* show this many digits after the decimal if number
is NOT converted to scientific notation.
The returned value is the LaTex' string. If the number is zero, the
returned string is simply '0'.
>>> Base10Formatter(103, 3, 1, 1)
'103.0'
>>> Base10Formatter(103.0, 2, 1, 1)
'1.0 \\\\times 10^{2}'
>>> Base10Formatter(103.0, 2, 2, 1)
'1.03 \\\\times 10^{2}'
>>> Base10Formatter(2892.3, 3, 1, 1)
'2.9 \\\\times 10^{3}'
>>> Base10Formatter(0.0, 3, 1, 1)
'0'
>>> Base10Formatter(0.012, 2, 1, 1)
'1.2 \\\\times 10^{-2}'
>>> Base10Formatter(-0.1, 3, 1, 1)
Traceback (most recent call last):
...
ValueError: number must be >= 0
"""
if number < 0:
raise ValueError('number must be >= 0')
if number == 0:
return '0'
exponent = int(math.log10(number))
if math.log10(number) < exponent and number < 1:
exponent -= 1
if abs(exponent) >= exp_cutoff:
x = number / (10.**exponent)
formatstr = '%.' + '%d' % exp_decimal_digits + 'f \\times 10^{%d}'
return formatstr % (x, exponent)
else:
formatstr = '%.' + '%d' % decimal_digits + 'f'
return formatstr % number
def SplitLabel(label, splitlen, splitchar):
"""Splits a string with a return if it exceeds a certain length.
*label* a string giving the label we might split.
*splitlen* the maximum length of a label before we attempt to
split it.
*splitchar* the character added when splitting a label.
If len(*label*) > *splitlen*, we attempt to split the label in the
middle by adding *splitchar*. The label is split as close to the
middle as possible while splitting at a space.
No splitting as label length less than *splitlen*
>>> SplitLabel('WT virus 1', 10, '\\n')
'WT virus 1'
Splitting of this label
>>> SplitLabel('WT plasmid 1', 10, '\\n')
'WT\\nplasmid 1'
Splitting of this label
>>> SplitLabel('mutated WT plasmid 1', 10, '\\n')
'mutated WT\\nplasmid 1'
"""
if len(label) <= splitlen:
return label
else:
j = 0
imid = len(label) // 2
index = None
while 0 <= imid - j <= imid + j < len(label):
if label[imid - j].isspace():
return "%s%s%s" % (label[ : imid - j], splitchar, label[imid - j + 1 : ])
elif label[imid + j].isspace():
return "%s%s%s" % (label[ : imid + j], splitchar, label[imid + j + 1 : ])
j += 1
else:
return label # no white space to split
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
costypetrisor/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
JesseLivezey/plankton | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 5 | 4839 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
sriki18/scipy | scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
CCI-Tools/cate-core | cate/ops/index.py | 1 | 8641 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName, ValidationError
from cate.util.monitor import Monitor
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str = 'n34',
custom_region: PolygonLike.TYPE = None,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValidationError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that containts the index timeseries
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
with monitor.starting("Calculate the index", total_work=2):
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file, monitor=monitor.child(1))
with monitor.child(1).observing("Calculate mean"):
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
| mit |
xubenben/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
arthurmensch/modl | benchmarks/log.py | 1 | 2179 | import time
import numpy as np
from lightning.impl.primal_cd import CDClassifier
from lightning.impl.sag import SAGAClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from lightning.classification import SAGClassifier
from sklearn.linear_model import LogisticRegression
bunch = fetch_20newsgroups_vectorized(subset="all")
X = bunch.data
y = bunch.target
y[y >= 1] = 1
alpha = 1e-3
n_samples = X.shape[0]
sag = SAGClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
saga = SAGAClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
cd_classifier = CDClassifier(loss='log',
alpha=alpha / 2,
C=1 / n_samples,
tol=1e-10,
max_iter=100,
verbose=1,
random_state=0)
sklearn_sag = LogisticRegression(tol=1e-10, max_iter=1000,
verbose=2, random_state=0,
C=1. / (n_samples * alpha),
solver='sag',
penalty='l2',
fit_intercept=False)
classifiers = [{'name': 'Lightning SAG', 'estimator': sag},
{'name': 'Lightning SAGA', 'estimator': saga},
{'name': 'Sklearn SAG', 'estimator': sklearn_sag},
{'name': 'Lightning CD', 'estimator': cd_classifier},
]
start = time.time()
for classifier in classifiers:
print(classifier['name'])
clf = classifier['estimator']
clf.fit(X, y)
print("Training time", time.time() - start)
print("Accuracy", np.mean(clf.predict(X) == y))
n_nz = np.sum(np.sum(clf.coef_ != 0, axis=0, dtype=bool))
n_nz /= clf.coef_.size
print(clf.coef_)
print('Non-zero', n_nz)
| bsd-2-clause |
abhishekkrthakur/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
nlhepler/freetype-py3 | examples/glyph-vector-2.py | 1 | 3414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face(b'./Vera.ttf')
face.set_char_size( 32*64 )
face.load_char('g')
slot = face.glyph
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, fill = True, facecolor=(0.8,0.5,0.8), alpha=.25, lw=0)
glyph_outline = patches.PathPatch(path, fill = False, edgecolor='black', lw=3)
plt.imshow(Z, extent=[x.min(), x.max(),y.min(), y.max()],
interpolation='nearest', cmap = plt.cm.gray_r, vmin=0, vmax=400)
plt.xticks(numpy.linspace(x.min(), x.max(), Z.shape[1]+1), ())
plt.yticks(numpy.linspace(y.min(), y.max(), Z.shape[0]+1), ())
plt.grid(color='k', linewidth=1, linestyle='-')
axis.add_patch(glyph)
axis.add_patch(glyph_outline)
axis.set_xlim(x.min(), x.max())
axis.set_ylim(y.min(), y.max())
plt.savefig('test.pdf')
plt.show()
| bsd-3-clause |
rbharath/pande-gas | vs_utils/utils/dragon_utils.py | 3 | 5800 | """
Dragon utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from cStringIO import StringIO
import numpy as np
import os
import pandas as pd
import subprocess
import tempfile
from vs_utils.utils import SmilesGenerator
class Dragon(object):
"""
Wrapper for dragon6shell.
Parameters
----------
subset : str, optional (default '2d')
Descriptor subset.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, subset='2d', **kwargs):
self.subset = subset
self.initialized = False
self.config_filename, self.smiles_engine = None, None
self.smiles_engine_kwargs = kwargs
def initialize(self):
"""
Initialize.
This is not part of __init__ because it breaks IPython.parallel.
"""
fd, self.config_filename = tempfile.mkstemp()
os.close(fd)
with open(self.config_filename, 'wb') as f:
f.write(self.get_config())
self.smiles_engine = SmilesGenerator(**self.smiles_engine_kwargs)
self.initialized = True
def __del__(self):
"""
Cleanup.
"""
if self.config_filename is not None:
os.unlink(self.config_filename)
def get_config(self):
"""
Get configuration file.
"""
if self.subset == '2d':
return """<?xml version="1.0" encoding="utf-8"?>
<DRAGON version="6.0.36" script_version="1" generation_date="2014/11/17">
<OPTIONS>
<CheckUpdates value="true"/>
<SaveLayout value="true"/>
<ShowWorksheet value="false"/>
<Decimal_Separator value="."/>
<Missing_String value="NaN"/>
<DefaultMolFormat value="1"/>
<HelpBrowser value="/usr/bin/xdg-open"/>
<RejectUnusualValence value="false"/>
<Add2DHydrogens value="false"/>
<MaxSRforAllCircuit value="19"/>
<MaxSR value="35"/>
<MaxSRDetour value="30"/>
<MaxAtomWalkPath value="2000"/>
<LogPathWalk value="true"/>
<LogEdge value="true"/>
<Weights>
<weight name="Mass"/>
<weight name="VdWVolume"/>
<weight name="Electronegativity"/>
<weight name="Polarizability"/>
<weight name="Ionization"/>
<weight name="I-State"/>
</Weights>
<SaveOnlyData value="false"/>
<SaveLabelsOnSeparateFile value="false"/>
<SaveFormatBlock value="%b - %n.txt"/>
<SaveFormatSubBlock value="%b-%s - %n - %m.txt"/>
<SaveExcludeMisVal value="false"/>
<SaveExcludeAllMisVal value="false"/>
<SaveExcludeConst value="false"/>
<SaveExcludeNearConst value="false"/>
<SaveExcludeStdDev value="false"/>
<SaveStdDevThreshold value="0.0001"/>
<SaveExcludeCorrelated value="false"/>
<SaveCorrThreshold value="0.95"/>
<SaveExclusionOptionsToVariables value="false"/>
<SaveExcludeMisMolecules value="false"/>
<SaveExcludeRejectedMolecules value="false"/>
</OPTIONS>
<DESCRIPTORS>
<block id="1" SelectAll="true"/>
<block id="2" SelectAll="true"/>
<block id="3" SelectAll="true"/>
<block id="4" SelectAll="true"/>
<block id="5" SelectAll="true"/>
<block id="6" SelectAll="true"/>
<block id="7" SelectAll="true"/>
<block id="8" SelectAll="true"/>
<block id="9" SelectAll="true"/>
<block id="10" SelectAll="true"/>
<block id="11" SelectAll="true"/>
<block id="12" SelectAll="true"/>
<block id="21" SelectAll="true"/>
<block id="22" SelectAll="true"/>
<block id="23" SelectAll="true"/>
<block id="24" SelectAll="true"/>
<block id="25" SelectAll="true"/>
<block id="28" SelectAll="true"/>
<block id="29" SelectAll="true"/>
</DESCRIPTORS>
<MOLFILES>
<molInput value="stdin"/>
<molInputFormat value="SMILES"/>
</MOLFILES>
<OUTPUT>
<SaveStdOut value="true"/>
<SaveProject value="false"/>
<SaveFile value="false"/>
<logMode value="stderr"/>
</OUTPUT>
</DRAGON>
"""
else:
raise NotImplementedError
def get_descriptors(self, mols):
"""
Parameters
----------
mols : array_like
Molecules.
"""
if not self.initialized:
self.initialize()
smiles = [self.smiles_engine.get_smiles(mol) for mol in mols]
args = ['dragon6shell', '-s', self.config_filename]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate('\n'.join(smiles))
if not stdout:
raise RuntimeError(stderr)
data, names = self.parse_descriptors(stdout)
# adjust for skipped molecules
# descriptors are in same order as smiles
missing = np.setdiff1d(smiles, names)
features = np.zeros(len(smiles), dtype=object)
idx = 0 # index into calculated features
for i, this_smiles in enumerate(smiles):
if this_smiles in missing:
features[i] = None
else:
assert this_smiles == names[idx] # confirm match
features[i] = data[idx]
idx += 1
assert len(features) == len(mols)
return features
def parse_descriptors(self, string):
"""
Parse Dragon descriptors.
Parameters
----------
string : str
Output from dragon6shell.
"""
df = pd.read_table(StringIO(string))
if self.subset == '2d':
del df['nHBonds'], df['Psi_e_1d'], df['Psi_e_1s']
# extract names
names = df['NAME'].values
# delete No. and NAME columns
del df['No.'], df['NAME']
return np.asarray(df, dtype=float), names
| bsd-3-clause |
guziy/basemap | setup.py | 1 | 6013 | from __future__ import (absolute_import, division, print_function)
import glob
import io
import os
import sys
from setuptools.dist import Distribution
if sys.version_info < (2, 6):
raise SystemExit("""matplotlib and the basemap toolkit require Python 2.6 or later.""")
# Do not require numpy for just querying the package
# Taken from the netcdf-python setup file (which took it from h5py setup file).
inc_dirs = []
if any('--' + opt in sys.argv for opt in Distribution.display_option_names +
['help-commands', 'help']) or sys.argv[1] == 'egg_info':
from setuptools import setup, Extension
else:
import numpy
# Use numpy versions if they are available.
from numpy.distutils.core import setup, Extension
# append numpy include dir.
inc_dirs.append(numpy.get_include())
def get_install_requirements(path):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding='utf-8') as fp:
content = fp.read()
return [req for req in content.split("\n")
if req != '' and not req.startswith('#')]
def checkversion(GEOS_dir):
"""check geos C-API header file (geos_c.h)"""
try:
f = open(os.path.join(GEOS_dir, 'include', 'geos_c.h'))
except IOError:
return None
geos_version = None
for line in f:
if line.startswith('#define GEOS_VERSION'):
geos_version = line.split()[2]
return geos_version
# get location of geos lib from environment variable if it is set.
if 'GEOS_DIR' in os.environ:
GEOS_dir = os.environ.get('GEOS_DIR')
else:
# set GEOS_dir manually here if automatic detection fails.
GEOS_dir = None
user_home = os.path.expanduser('~')
geos_search_locations = [user_home, os.path.join(user_home, 'local'),
'/usr', '/usr/local', '/sw', '/opt', '/opt/local']
if GEOS_dir is None:
# if GEOS_dir not set, check a few standard locations.
GEOS_dirs = geos_search_locations
for direc in GEOS_dirs:
geos_version = checkversion(direc)
sys.stdout.write('checking for GEOS lib in %s ....\n' % direc)
if geos_version is None or geos_version < '"3.1.1"':
continue
else:
sys.stdout.write('GEOS lib (version %s) found in %s\n' %\
(geos_version[1:-1],direc))
GEOS_dir = direc
break
else:
geos_version = checkversion(GEOS_dir)
if GEOS_dir is None:
raise SystemExit("""
Can't find geos library in standard locations ('%s').
Please install the corresponding packages using your
systems software management system (e.g. for Debian Linux do:
'apt-get install libgeos-3.3.3 libgeos-c1 libgeos-dev' and/or
set the environment variable GEOS_DIR to point to the location
where geos is installed (for example, if geos_c.h
is in /usr/local/include, and libgeos_c is in /usr/local/lib,
set GEOS_DIR to /usr/local), or edit the setup.py script
manually and set the variable GEOS_dir (right after the line
that says "set GEOS_dir manually here".""" % "', '".join(geos_search_locations))
else:
geos_include_dirs=[os.path.join(GEOS_dir,'include')] + inc_dirs
geos_library_dirs=[os.path.join(GEOS_dir,'lib'),os.path.join(GEOS_dir,'lib64')]
packages = ['mpl_toolkits','mpl_toolkits.basemap']
namespace_packages = ['mpl_toolkits']
package_dirs = {'':'lib'}
# can't install _geoslib in mpl_toolkits.basemap namespace,
# or Basemap objects won't be pickleable.
# don't use runtime_library_dirs on windows (workaround
# for a distutils bug - http://bugs.python.org/issue2437).
if sys.platform == 'win32':
runtime_lib_dirs = []
else:
runtime_lib_dirs = geos_library_dirs
extensions = [ Extension("_geoslib",['src/_geoslib.c'],
library_dirs=geos_library_dirs,
runtime_library_dirs=runtime_lib_dirs,
include_dirs=geos_include_dirs,
libraries=['geos_c']) ]
# Specify all the required mpl data
pathout =\
os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data')))
datafiles = glob.glob(os.path.join(pathout,'*'))
datafiles = [os.path.join('data',os.path.basename(f)) for f in datafiles]
package_data = {'mpl_toolkits.basemap':datafiles}
install_requires = get_install_requirements("requirements.txt")
__version__ = "1.2.1"
setup(
name = "basemap",
version = __version__,
description = "Plot data on map projections with matplotlib",
long_description = """
An add-on toolkit for matplotlib that lets you plot data
on map projections with coastlines, lakes, rivers and political boundaries.
See http://matplotlib.org/basemap/users/examples.html for
examples of what it can do.""",
url = "https://matplotlib.org/basemap/",
download_url = "https://github.com/matplotlib/basemap/archive/v{0}rel.tar.gz".format(__version__),
author = "Jeff Whitaker",
author_email = "jeffrey.s.whitaker@noaa.gov",
maintainer = "Ben Root",
maintainer_email = "ben.v.root@gmail.com",
install_requires = install_requires,
platforms = ["any"],
license = "OSI Approved",
keywords = ["python","plotting","plots","graphs","charts","GIS","mapping","map projections","maps"],
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent"],
packages = packages,
namespace_packages = namespace_packages,
package_dir = package_dirs,
ext_modules = extensions,
package_data = package_data
)
| gpl-2.0 |
google-research/google-research | smu/parser/smu_utils_lib_test.py | 1 | 35529 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""
import copy
import os
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from rdkit import Chem
from google.protobuf import text_format
from smu import dataset_pb2
from smu.parser import smu_parser_lib
from smu.parser import smu_utils_lib
MAIN_DAT_FILE = 'x07_sample.dat'
STAGE1_DAT_FILE = 'x07_stage1.dat'
TESTDATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'testdata')
def str_to_bond_topology(s):
bt = dataset_pb2.BondTopology()
text_format.Parse(s, bt)
return bt
def get_stage1_conformer():
parser = smu_parser_lib.SmuParser(
os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))
conformer, _ = next(parser.process_stage1())
return conformer
def get_stage2_conformer():
parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))
conformer, _ = next(parser.process_stage2())
return conformer
class SpecialIDTest(absltest.TestCase):
def test_from_dat_id(self):
self.assertIsNone(
smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'),
899650)
self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'),
899650)
with self.assertRaises(ValueError):
smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')
def test_from_bt_id(self):
self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))
self.assertEqual(
smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)
class GetCompositionTest(absltest.TestCase):
def test_simple(self):
bt = dataset_pb2.BondTopology()
bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_C,
dataset_pb2.BondTopology.ATOM_N,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H,
dataset_pb2.BondTopology.ATOM_H])
self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))
class GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):
def test_cyclobutane(self):
bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')
def test_ethylene(self):
bt = smu_utils_lib.create_bond_topology('CC', '2', '22')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')
def test_acrylic_acid(self):
bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(c)(ch)(ch2)(o)(oh)')
def test_fluorine(self):
bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')
def test_fully_saturated(self):
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(
smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')
def test_nplus_oneg(self):
bt = smu_utils_lib.create_bond_topology('NO', '1', '30')
self.assertEqual(
smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),
'(nh3)(o)')
class ParseBondTopologyTest(absltest.TestCase):
def test_4_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 4 N+O O O- 010110 3000')
self.assertEqual(num_atoms, 4)
self.assertEqual(atoms_str, 'N+O O O-')
self.assertEqual(matrix, '010110')
self.assertEqual(hydrogens, '3000')
def test_7_heavy(self):
num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(
' 7 N+O O O O-F F 001011101001000000000 1000000')
self.assertEqual(num_atoms, 7)
self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space
self.assertEqual(matrix, '001011101001000000000')
self.assertEqual(hydrogens, '1000000')
class CreateBondTopologyTest(absltest.TestCase):
def test_no_charged(self):
got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_charged(self):
# This is actually C N N+O-
got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
def test_one_heavy(self):
got = smu_utils_lib.create_bond_topology('C', '', '4')
expected_str = '''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
'''
expected = str_to_bond_topology(expected_str)
self.assertEqual(str(expected), str(got))
class FromCSVTest(absltest.TestCase):
def test_basic(self):
infile = tempfile.NamedTemporaryFile(mode='w', delete=False)
infile.write(
'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')
infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')
infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')
infile.close()
out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)
bt = next(out)
self.assertEqual(68, bt.bond_topology_id)
self.assertLen(bt.atoms, 4)
self.assertEqual(bt.smiles, '[NH+]#C[O-]')
bt = next(out)
self.assertEqual(134, bt.bond_topology_id)
self.assertLen(bt.atoms, 5)
self.assertEqual(bt.smiles, '[O-][NH+](F)F')
class ParseDuplicatesFileTest(absltest.TestCase):
def test_basic(self):
df = smu_utils_lib.parse_duplicates_file(
os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))
pd.testing.assert_frame_equal(
pd.DataFrame(
columns=['name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1',
'name2', 'stoich2', 'btid2', 'shortconfid2', 'confid2'],
data=[
['x07_c2n2o2fh3.224227.004',
'c2n2o2fh3', 224227, 4, 224227004,
'x07_c2n2o2fh3.224176.005',
'c2n2o2fh3', 224176, 5, 224176005],
['x07_c2n2o2fh3.260543.005',
'c2n2o2fh3', 260543, 5, 260543005,
'x07_c2n2o2fh3.224050.001',
'c2n2o2fh3', 224050, 1, 224050001],
]),
df,
check_like=True)
class BondTopologyToMoleculeTest(absltest.TestCase):
def test_o2(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('O=O', Chem.MolToSmiles(got))
def test_methane(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))
# This molecule is an N+ central atom, bonded to C (triply), O-, and F
def test_charged_molecule(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
''')
got = smu_utils_lib.bond_topology_to_molecule(bond_topology)
self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))
class ConformerToMoleculeTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
# We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(
self.conformer.initial_geometries[0])
new_geom = self.conformer.initial_geometries[1]
for atom_pos in new_geom.atom_positions:
atom_pos.x = atom_pos.x * 1000
atom_pos.y = atom_pos.y * 1000
atom_pos.z = atom_pos.z * 1000
# For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[1].bond_topology_id = 99999
def test_all_outputs(self):
mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))
self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
'SMU 618451001 bt=618451(0/2) geom=opt',
'SMU 618451001 bt=99999(1/2) geom=init(0/2)',
'SMU 618451001 bt=99999(1/2) geom=init(1/2)',
'SMU 618451001 bt=99999(1/2) geom=opt'
])
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))
def test_initial_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=True,
include_optimized_geometry=False,
include_all_bond_topologies=False))
self.assertLen(mols, 2)
self.assertEqual([m.GetProp('_Name') for m in mols], [
'SMU 618451001 bt=618451(0/2) geom=init(0/2)',
'SMU 618451001 bt=618451(0/2) geom=init(1/2)',
])
# This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.6643, -3.470301, 3.4766],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],
list(mols[1].GetConformer().GetAtomPosition(1)),
atol=1e-6)
def test_optimized_only(self):
mols = list(
smu_utils_lib.conformer_to_molecules(
self.conformer,
include_initial_geometries=False,
include_optimized_geometry=True,
include_all_bond_topologies=False))
self.assertLen(mols, 1)
self.assertEqual(
mols[0].GetProp('_Name'),
'SMU 618451001 bt=618451(0/2) geom=opt',
)
self.assertEqual(
'[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',
Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))
# This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())
np.testing.assert_allclose([0.540254, -3.465543, 3.456982],
list(mols[0].GetConformer().GetAtomPosition(1)),
atol=1e-6)
self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())
np.testing.assert_allclose([2.135153, -1.817366, 0.226376],
list(mols[0].GetConformer().GetAtomPosition(13)),
atol=1e-6)
class SmilesCompareTest(absltest.TestCase):
def test_string_format(self):
# for some simplicity later on, we use shorter names
self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))
self.assertEqual('MISMATCH',
str(smu_utils_lib.SmilesCompareResult.MISMATCH))
self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))
def test_missing(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'O=O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
def test_mismatch(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)
self.assertEqual('O=O', with_h)
self.assertEqual('O=O', without_h)
def test_matched_and_h_stripping(self):
bond_topology = str_to_bond_topology('''
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
''')
result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(
bond_topology)
self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)
self.assertEqual('[H]O[H]', with_h)
self.assertEqual('O', without_h)
# Also directly test compute_smiles_for_bond_topology
self.assertEqual(
'[H]O[H]',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=True))
self.assertEqual(
'O',
smu_utils_lib.compute_smiles_for_bond_topology(
bond_topology, include_hs=False))
def test_compute_smiles_from_molecule_no_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
# This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')
def test_compute_smiles_from_molecule_with_hs(self):
mol = Chem.MolFromSmiles('FOC', sanitize=False)
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)
mol = Chem.AddHs(mol)
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),
'[H]C([H])([H])OF')
def test_compute_smiles_from_molecule_special_case(self):
mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)
# Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23',
Chem.MolToSmiles(mol, kekuleSmiles=True))
self.assertEqual(
smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),
'C12=C3C1=C1C2=C31')
def test_compute_smiles_from_molecule_labeled_with_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=True, labeled_atoms=True))
def test_compute_smiles_from_molecule_labeled_no_h(self):
mol = Chem.MolFromSmiles(
'[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)
self.assertIsNotNone(mol)
self.assertEqual(
'[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',
smu_utils_lib.compute_smiles_for_molecule(
mol, include_hs=False, labeled_atoms=True))
class MergeConformersTest(absltest.TestCase):
def setUp(self):
super().setUp()
# We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer = get_stage1_conformer()
self.stage2_conformer = get_stage2_conformer()
self.duplicate_conformer = dataset_pb2.Conformer()
self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id
# A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by = 123
self.duplicate_conformer.duplicate_of.extend([111, 222])
def test_two_stage2(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage2_conformer)
def test_two_stage1(self):
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage1_conformer)
def test_two_duplicates(self):
duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)
duplicate_conformer2.duplicate_of[:] = [333, 444]
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.duplicate_conformer, duplicate_conformer2)
self.assertIsNone(got_conflict)
self.assertEqual(123, got_conf.duplicated_by)
self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)
def test_stage2_stage1(self):
# Add a duplicate to stage1 to make sure it is copied
self.stage1_conformer.duplicate_of.append(999)
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [999])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_energy(self):
self.stage2_conformer.properties.initial_geometry_energy.value = -1.23
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
# This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)
def test_stage2_stage1_conflict_error_codes(self):
self.stage2_conformer.properties.errors.error_nstat1 = 999
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
999, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_conflict_missing_geometry(self):
self.stage2_conformer.ClearField('optimized_geometry')
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertEqual(got_conflict, [
618451001,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,
1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, False
])
# Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_stage1_no_conflict_minus1(self):
# If stage2 contains a -1, we keep that (stricter error checking later on)
self.stage2_conformer.properties.initial_geometry_energy.value = -1.0
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)
def test_stage2_stage1_no_conflict_approx_equal(self):
self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.stage1_conformer)
self.assertIsNone(got_conflict)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage2_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage2_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)
def test_stage1_duplicate(self):
got_conf, got_conflict = smu_utils_lib.merge_conformer(
self.stage1_conformer, self.duplicate_conformer)
self.assertIsNone(got_conflict)
self.assertEqual(got_conf.duplicate_of, [111, 222])
self.assertEqual(got_conf.duplicated_by, 123)
# Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))
def test_multiple_initial_geometries(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_multiple_bond_topologies(self):
bad_conformer = copy.deepcopy(self.stage1_conformer)
bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)
def test_different_bond_topologies(self):
self.stage1_conformer.bond_topologies[0].atoms[0] = (
dataset_pb2.BondTopology.ATOM_H)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage1_conformer,
self.stage2_conformer)
with self.assertRaises(ValueError):
smu_utils_lib.merge_conformer(self.stage2_conformer,
self.stage1_conformer)
class ConformerErrorTest(absltest.TestCase):
def test_stage1_no_error(self):
conformer = get_stage1_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage1_error(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_frequencies = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_no_error(self):
conformer = get_stage2_conformer()
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_1_expected_field(self):
conformer = get_stage2_conformer()
conformer.properties.errors.error_rotational_modes = 123
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_error_in_0_expected_field(self):
conformer = get_stage2_conformer()
# This field is 0 to indicate no error. Why the discrepancy? Who knows!
conformer.properties.errors.error_nsvg09 = 1
self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))
def test_stage2_nstat1_is_3(self):
# This is the other bizaare case. nstat1 of 3 is still considered success.
conformer = get_stage2_conformer()
conformer.properties.errors.error_nstat1 = 3
self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))
class FilterConformerByAvailabilityTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = dataset_pb2.Conformer()
properties = self.conformer.properties
# A STANDARD field
properties.single_point_energy_pbe0d3_6_311gd.value = 1.23
# A COMPLETE field
properties.homo_pbe0_aug_pc_1.value = 1.23
# An INTERNAL_ONLY field
properties.nuclear_repulsion_energy.value = 1.23
def test_standard(self):
smu_utils_lib.filter_conformer_by_availability(self.conformer,
[dataset_pb2.STANDARD])
self.assertTrue(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertFalse(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
def test_complete_and_internal_only(self):
smu_utils_lib.filter_conformer_by_availability(
self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])
self.assertFalse(
self.conformer.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertTrue(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))
self.assertTrue(
self.conformer.properties.HasField('nuclear_repulsion_energy'))
class ConformerToStandardTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_field_filtering(self):
# Check that the field which should be filtered starts out set
self.assertTrue(self.conformer.properties.HasField(
'single_point_energy_hf_6_31gd'))
got = smu_utils_lib.conformer_to_standard(self.conformer)
# Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField(
'single_point_energy_pbe0d3_6_311gd'))
self.assertFalse(
got.properties.HasField('single_point_energy_hf_6_31gd'))
def test_remove_error_conformer(self):
self.conformer.properties.errors.error_frequencies = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
def test_remove_duplicate(self):
self.conformer.duplicated_by = 123
self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))
class DetermineFateTest(parameterized.TestCase):
def test_duplicate_same_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
def test_duplicate_different_topology(self):
conformer = get_stage1_conformer()
# bond topology is conformer_id // 1000
conformer.duplicated_by = conformer.conformer_id + 1000
self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,
smu_utils_lib.determine_fate(conformer))
@parameterized.parameters(
(2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),
(5, dataset_pb2.Conformer.FATE_DISASSOCIATED),
(4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),
(6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))
def test_geometry_failures(self, nstat1, expected_fate):
conformer = get_stage1_conformer()
conformer.properties.errors.error_nstat1 = nstat1
self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))
def test_no_result(self):
conformer = get_stage1_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,
smu_utils_lib.determine_fate(conformer))
def test_calculation_errors(self):
conformer = get_stage2_conformer()
# This is a random choice of an error to set. I just need some error.
conformer.properties.errors.error_atomic_analysis = 999
self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR,
smu_utils_lib.determine_fate(conformer))
def test_success(self):
conformer = get_stage2_conformer()
self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,
smu_utils_lib.determine_fate(conformer))
class ToBondTopologySummaryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.conformer = get_stage2_conformer()
def test_dup_same(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_same_topology, 1)
def test_dup_diff(self):
self.conformer.fate = (
dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_duplicates_different_topology, 1)
def test_geometry_failed(self):
self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_failed_geometry_optimization, 1)
def test_missing_calculation(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 1)
self.assertEqual(got[0].count_attempted_conformers, 1)
self.assertEqual(got[0].count_kept_geometry, 1)
self.assertEqual(got[0].count_missing_calculation, 1)
def test_calculation_with_error(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_with_error, 0)
self.assertEqual(got[0].count_detected_match_with_error, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_with_error, 1)
self.assertEqual(got[1].count_detected_match_with_error, 0)
def test_calculation_success(self):
self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])
self.conformer.bond_topologies[-1].bond_topology_id = 123
got = list(
smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))
self.assertLen(got, 2)
# We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id, 123)
self.assertEqual(got[0].count_attempted_conformers, 0)
self.assertEqual(got[0].count_kept_geometry, 0)
self.assertEqual(got[0].count_calculation_success, 0)
self.assertEqual(got[0].count_detected_match_success, 1)
self.assertEqual(got[1].bond_topology.bond_topology_id,
self.conformer.bond_topologies[0].bond_topology_id)
self.assertEqual(got[1].count_attempted_conformers, 1)
self.assertEqual(got[1].count_kept_geometry, 1)
self.assertEqual(got[1].count_calculation_success, 1)
self.assertEqual(got[1].count_detected_match_success, 0)
class LabeledSmilesTester(absltest.TestCase):
def test_atom_labels(self):
mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)
self.assertIsNotNone(mol)
smiles_before = Chem.MolToSmiles(mol)
self.assertEqual(
smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')
# Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
for atom in mol.GetAtoms():
self.assertEqual(atom.GetAtomMapNum(), 0)
self.assertEqual(Chem.MolToSmiles(mol), smiles_before)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | 1 | 2522 | from __future__ import print_function
import string
from matplotlib import inspect
class FormatDict(dict):
"""Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting"""
def __missing__(self, key):
return "{" + key + "}"
class DocReplacer(object):
"""Decorator object for replacing patterns in docstrings using string.format."""
def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict):
'''
Parameters
-------------
auto_indent : bool
Flag for automatically indenting the replaced lines to the level of the docstring.
allow_partial_formatting : bool
Emnables partial formatting (i.e., not all keys are available in the dictionary)
doc_dict : kwargs
Pattern in docstring that a key in this dict will be replaced by the corresponding values.
Example
-------------
TODO: Update this documentation
@DocReplacer({'p1': 'p1 : int\n\tFirst parameter'})
def foo(p1):
"""
Some functions.
Params:
{p1}
"""
will result in foo's docstring being:
"""
Some functions.
Params:
p1 : int
First parameter
"""
'''
self.doc_dict = doc_dict
self.auto_dedent = auto_dedent
self.allow_partial_formatting = allow_partial_formatting
def __call__(self, func):
if func.__doc__:
doc = func.__doc__
if self.auto_dedent:
doc = inspect.cleandoc(doc)
func.__doc__ = self._format(doc)
return func
def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict)
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.doc_dict.update(*args, **kwargs)
def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping)
| mit |
uberdugo/mlia | Ch05/EXTRAS/plot2D.py | 7 | 1276 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import logRegres
dataMat,labelMat=logRegres.loadDataSet()
dataArr = array(dataMat)
weights = logRegres.stocGradAscent0(dataArr,labelMat)
n = shape(dataArr)[0] #number of points to create
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
markers =[]
colors =[]
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(xcord,ycord, c=colors, s=markers)
type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
type2 = ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
#weights = [-2.9, 0.72, 1.29]
#weights = [-5, 1.09, 1.42]
weights = [13.03822793, 1.32877317, -1.96702074]
weights = [4.12, 0.48, -0.6168]
y = (-weights[0]-weights[1]*x)/weights[2]
type3 = ax.plot(x, y)
#ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2)
#ax.axis([-5000,100000,-2,25])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() | gpl-3.0 |
chrisjdavie/shares | machine_learning/sklearn_dataset_format.py | 1 | 1160 | '''
Created on 2 Sep 2014
@author: chris
'''
'''File format - data, length of data, containing unicode
- target, length of data, contains int reference to target
- target_names, type names relative to target
- filenames, names of files storing data (probably target too)
'''
def main():
''' taken from the tutorials, I'm having a look at how they store datasets'''
from sklearn.datasets import fetch_20newsgroups
# import numpy as np
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
twenty_train = fetch_20newsgroups(subset='train',
categories=categories,
shuffle=True,
random_state=42)
print dir(twenty_train)
print twenty_train.keys()
# print twenty_train.data[0]
print twenty_train.target[0]
print len(twenty_train.filenames)
print twenty_train.filenames[0]
print twenty_train.target_names
if __name__ == '__main__':
main() | mit |
tienjunhsu/trading-with-python | lib/widgets.py | 78 | 3012 | # -*- coding: utf-8 -*-
"""
A collection of widgets for gui building
Copyright: Jev Kuznetsov
License: BSD
"""
from __future__ import division
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MatplotlibWidget(QWidget):
def __init__(self,parent=None,grid=True):
QWidget.__init__(self,parent)
self.grid = grid
self.fig = Figure()
self.canvas =FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.mpl_connect('button_press_event', self.onPick) # bind pick event
#self.axes = self.fig.add_subplot(111)
margins = [0.05,0.1,0.9,0.8]
self.axes = self.fig.add_axes(margins)
self.toolbar = NavigationToolbar(self.canvas,self)
#self.initFigure()
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def onPick(self,event):
print 'Pick event'
print 'you pressed', event.button, event.xdata, event.ydata
def update(self):
self.canvas.draw()
def plot(self,*args,**kwargs):
self.axes.plot(*args,**kwargs)
self.axes.grid(self.grid)
self.update()
def clear(self):
self.axes.clear()
def initFigure(self):
self.axes.grid(True)
x = np.linspace(-1,1)
y = x**2
self.axes.plot(x,y,'o-')
class PlotWindow(QMainWindow):
''' a stand-alone window with embedded matplotlib widget '''
def __init__(self,parent=None):
super(PlotWindow,self).__init__(parent)
self.setAttribute(Qt.WA_DeleteOnClose)
self.mplWidget = MatplotlibWidget()
self.setCentralWidget(self.mplWidget)
def plot(self,dataFrame):
''' plot dataframe '''
dataFrame.plot(ax=self.mplWidget.axes)
def getAxes(self):
return self.mplWidget.axes
def getFigure(self):
return self.mplWidget.fig
def update(self):
self.mplWidget.update()
class MainForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.plot = MatplotlibWidget()
self.setCentralWidget(self.plot)
self.plot.clear()
self.plot.plot(np.random.rand(10),'x-')
#---------------------
if __name__=='__main__':
app = QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_() | bsd-3-clause |
tashaxe/Red-DiscordBot | lib/youtube_dl/extractor/wsj.py | 7 | 4311 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
https?://video-api\.wsj\.com/api-video/player/iframe\.html\?.*?\bguid=|
https?://(?:www\.)?wsj\.com/video/[^/]+/|
wsj:
)
(?P<id>[a-fA-F0-9-]{36})
'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://video-api.wsj.com/api-video/find_all_videos.asp', video_id,
query={
'type': 'guid',
'count': 1,
'query': video_id,
'fields': ','.join((
'type', 'hls', 'videoMP4List', 'thumbnailList', 'author',
'description', 'name', 'duration', 'videoURL', 'titletag',
'formattedCreationDate', 'keywords', 'editor')),
})['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
class WSJArticleIE(InfoExtractor):
_VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?',
'info_dict': {
'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362',
'ext': 'mp4',
'upload_date': '20170221',
'uploader_id': 'ralcaraz',
'title': 'Bao Bao the Panda Leaves for China',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
video_id = self._search_regex(
r'data-src=["\']([a-fA-F0-9-]{36})', webpage, 'video id')
return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id)
| gpl-3.0 |
TomAugspurger/pandas | pandas/tests/series/methods/test_rename_axis.py | 4 | 1503 | import pytest
from pandas import Index, MultiIndex, Series
import pandas._testing as tm
class TestSeriesRenameAxis:
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
ser = Series(list(range(len(mi))), index=mi)
result = ser.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
result = ser.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
result = ser.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
with pytest.raises(TypeError, match="unexpected"):
ser.rename_axis(columns="wrong")
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis("foo")
result = datetime_series
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"mapper": None}, {"index": None}, {}])
def test_rename_axis_none(self, kwargs):
# GH 25034
index = Index(list("abc"), name="foo")
ser = Series([1, 2, 3], index=index)
result = ser.rename_axis(**kwargs)
expected_index = index.rename(None) if kwargs else index
expected = Series([1, 2, 3], index=expected_index)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
hep-gc/panda-autopyfactory | bin/factory.py | 1 | 6335 | #! /usr/bin/env python
#
# Simple(ish) python condor_g factory for panda pilots
#
# $Id$
#
#
# Copyright (C) 2007,2008,2009 Graeme Andrew Stewart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import logging
import logging.handlers
import time
import os
import sys
import traceback
# Need to set PANDA_URL_MAP before the Client module is loaded (which happens
# when the Factory module is loaded). Unfortunately this means that logging
# is not yet available.
if not 'APF_NOSQUID' in os.environ:
if not 'PANDA_URL_MAP' in os.environ:
os.environ['PANDA_URL_MAP'] = 'CERN,http://pandaserver.cern.ch:25085/server/panda,https://pandaserver.cern.ch:25443/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL_MAP to %s' % os.environ['PANDA_URL_MAP']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL_MAP set to %s. Not changed.' % os.environ['PANDA_URL_MAP']
if not 'PANDA_URL' in os.environ:
os.environ['PANDA_URL'] = 'http://pandaserver.cern.ch:25085/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL to %s' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL set to %s. Not changed.' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found APF_NOSQUID set. Not changing/setting panda client environment.'
from autopyfactory.Factory import factory
from autopyfactory.Exceptions import FactoryConfigurationFailure
def main():
parser = OptionParser(usage='''%prog [OPTIONS]
autopyfactory is an ATLAS pilot factory.
This program is licenced under the GPL, as set out in LICENSE file.
Author(s):
Graeme A Stewart <g.stewart@physics.gla.ac.uk>, Peter Love <p.love@lancaster.ac.uk>
''', version="%prog $Id$")
parser.add_option("--verbose", "--debug", dest="logLevel", default=logging.INFO,
action="store_const", const=logging.DEBUG, help="Set logging level to DEBUG [default INFO]")
parser.add_option("--quiet", dest="logLevel",
action="store_const", const=logging.WARNING, help="Set logging level to WARNING [default INFO]")
parser.add_option("--test", "--dry-run", dest="dryRun", default=False,
action="store_true", help="Dry run - supress job submission")
parser.add_option("--oneshot", "--one-shot", dest="cyclesToDo", default=0,
action="store_const", const=1, help="Run one cycle only")
parser.add_option("--cycles", dest="cyclesToDo",
action="store", type="int", metavar="CYCLES", help="Run CYCLES times, then exit [default infinite]")
parser.add_option("--sleep", dest="sleepTime", default=120,
action="store", type="int", metavar="TIME", help="Sleep TIME seconds between cycles [default %default]")
parser.add_option("--conf", dest="confFiles", default="factory.conf",
action="store", metavar="FILE1[,FILE2,FILE3]", help="Load configuration from FILEs (comma separated list)")
parser.add_option("--log", dest="logfile", default="syslog", metavar="LOGFILE", action="store",
help="Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]")
(options, args) = parser.parse_args()
options.confFiles = options.confFiles.split(',')
# Setup logging
factoryLogger = logging.getLogger('main')
if options.logfile == "stdout":
logStream = logging.StreamHandler()
elif options.logfile == 'syslog':
logStream = logging.handlers.SysLogHandler('/dev/log')
else:
logStream = logging.handlers.RotatingFileHandler(filename=options.logfile, maxBytes=10000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s %(message)s')
logStream.setFormatter(formatter)
factoryLogger.addHandler(logStream)
factoryLogger.setLevel(options.logLevel)
factoryLogger.debug('logging initialised')
# Main loop
try:
f = factory(factoryLogger, options.dryRun, options.confFiles)
cyclesDone = 0
while True:
factoryLogger.info('\nStarting factory cycle %d at %s', cyclesDone, time.asctime(time.localtime()))
f.factorySubmitCycle(cyclesDone)
factoryLogger.info('Factory cycle %d done' % cyclesDone)
cyclesDone += 1
if cyclesDone == options.cyclesToDo:
break
factoryLogger.info('Sleeping %ds' % options.sleepTime)
time.sleep(options.sleepTime)
f.updateConfig(cyclesDone)
except KeyboardInterrupt:
factoryLogger.info('Caught keyboard interrupt - exiting')
except FactoryConfigurationFailure, errMsg:
factoryLogger.error('Factory configuration failure: %s', errMsg)
except ImportError, errorMsg:
factoryLogger.error('Failed to import necessary python module: %s' % errorMsg)
except:
# TODO - make this a logger.exception() call
factoryLogger.error('''Unexpected exception! There was an exception
raised which the factory was not expecting and did not know how to
handle. You may have discovered a new bug or an unforseen error
condition. Please report this exception to Graeme
<g.stewart@physics.gla.ac.uk>. The factory will now re-raise this
exception so that the python stack trace is printed, which will allow
it to be debugged - please send output from this message
onwards. Exploding in 5...4...3...2...1... Have a nice day!''')
# The following line prints the exception to the logging module
factoryLogger.error(traceback.format_exc(None))
raise
if __name__ == "__main__":
main()
| gpl-3.0 |
jrbourbeau/cr-composition | processing/legacy/anisotropy/random_trials/process_kstest.py | 2 | 7627 | #!/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
import pycondor
import comptools as comp
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],
help='Detector configuration')
p.add_argument('--low_energy', dest='low_energy',
default=False, action='store_true',
help='Only use events with energy < 10**6.75 GeV')
p.add_argument('--n_side', dest='n_side', type=int,
default=64,
help='Number of times to split the DataFrame')
p.add_argument('--chunksize', dest='chunksize', type=int,
default=1000,
help='Number of lines used when reading in DataFrame')
p.add_argument('--n_batches', dest='n_batches', type=int,
default=50,
help='Number batches running in parallel for each ks-test trial')
p.add_argument('--ks_trials', dest='ks_trials', type=int,
default=100,
help='Number of random maps to generate')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
p.add_argument('--test', dest='test',
default=False, action='store_true',
help='Option to run small test version')
args = p.parse_args()
if args.test:
args.ks_trials = 20
args.n_batches = 10000
args.chunksize = 100
# Define output directories
error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config)
output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config)
log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config)
submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config)
# Define path to executables
make_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'make_maps.py')
merge_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'merge_maps.py')
save_pvals_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'save_pvals.py')
# Create Dagman instance
dag_name = 'anisotropy_kstest_{}'.format(args.config)
if args.test:
dag_name += '_test'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
# Create Job for saving ks-test p-values for each trial
save_pvals_name = 'save_pvals_{}'.format(args.config)
if args.low_energy:
save_pvals_name += '_lowenergy'
save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
save_pvals_infiles_0 = []
save_pvals_infiles_1 = []
dagman.add_job(save_pvals_job)
outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',
'anisotropy', 'random_splits')
if args.test:
outdir = os.path.join(outdir, 'test')
for trial_num in range(args.ks_trials):
# Create map_maps jobs for this ks_trial
make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)
if args.low_energy:
make_maps_name += '_lowenergy'
make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
dagman.add_job(make_maps_job)
merge_maps_infiles_0 = []
merge_maps_infiles_1 = []
for batch_idx in range(args.n_batches):
if args.test and batch_idx > 2:
break
outfile_sample_1 = os.path.join(outdir,
'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
outfile_sample_0 = os.path.join(outdir,
'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
make_maps_arg_list = []
make_maps_arg_list.append('--config {}'.format(args.config))
make_maps_arg_list.append('--n_side {}'.format(args.n_side))
make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))
make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))
make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))
make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))
make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))
make_maps_arg = ' '.join(make_maps_arg_list)
if args.low_energy:
make_maps_arg += ' --low_energy'
make_maps_job.add_arg(make_maps_arg)
# Add this outfile to the list of infiles for merge_maps_job
merge_maps_infiles_0.append(outfile_sample_0)
merge_maps_infiles_1.append(outfile_sample_1)
for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,
merge_maps_infiles_1]):
merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)
if args.low_energy:
merge_maps_name += '_lowenergy'
merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
# Ensure that make_maps_job completes before merge_maps_job begins
make_maps_job.add_child(merge_maps_job)
merge_maps_job.add_child(save_pvals_job)
dagman.add_job(merge_maps_job)
merge_infiles_str = ' '.join(input_file_list)
# Assemble merged output file path
merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))
merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)
merge_maps_job.add_arg(merge_maps_arg)
if sample_idx == 0:
save_pvals_infiles_0.append(merge_outfile)
else:
save_pvals_infiles_1.append(merge_outfile)
save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)
save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)
if args.low_energy:
outfile_basename = 'ks_test_dataframe_lowenergy.hdf'
else:
outfile_basename = 'ks_test_dataframe.hdf'
outfile = os.path.join(outdir, outfile_basename)
save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \
'--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)
save_pvals_job.add_arg(save_pvals_arg)
dagman.build_submit(fancyname=True)
| mit |
zseder/hunmisc | hunmisc/utils/plotting/matplotlib_simple_xy.py | 1 | 1535 | """
Copyright 2011-13 Attila Zseder
Email: zseder@gmail.com
This file is part of hunmisc project
url: https://github.com/zseder/hunmisc
hunmisc is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import matplotlib.pyplot as plt
from matplotlib import rc
def read_data(istream):
r = [[],[],[],[],[]]
for l in istream:
le = l.strip().split()
[r[i].append(le[i]) for i in xrange(len(le))]
return r
def main():
d = read_data(open(sys.argv[1]))
rc('font', size=14)
ax = plt.subplot(111)
ax.plot(d[0], d[1], label="$M$", linewidth=2)
ax.plot(d[0], d[2], label="$l KL$", linewidth=2)
ax.plot(d[0], d[3], label="$l (H_q+KL)$", linewidth=2)
ax.plot(d[0], d[4], label="$M + l (H_q+KL)$", linewidth=2)
plt.xlabel("Bits")
ax.legend(loc=7)
plt.show()
#plt.savefig("fig.png")
if __name__ == "__main__":
main()
| gpl-3.0 |
hanteng/babel | scripts/geoname_cldr.py | 1 | 2479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# url_target="https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv"
import csv
import pandas as pd
import codecs
def export_to_csv(df, ex_filename, sep=','):
if sep==',':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_ALL, na_rep='{na}', encoding='utf-8') #+'.csv'
if sep=='\t':
df.to_csv(ex_filename, sep=sep, quoting=csv.QUOTE_NONE, na_rep='{na}', encoding='utf-8') #+'.tsv' , escapechar="'", quotechar=""
def import_from_babel_cldr():
from babel import Locale
#staring from the en-US to retrieve keys
locale = Locale('en', 'US')
completelist_territories = locale.territories.keys()
completelist_languages = locale.languages.keys()
#intiate the output dataframe from this
df_cldr=pd.DataFrame.from_dict(locale.territories, orient="index")
df_cldr.index.name='geocode'
df_cldr.columns = ['name_en']
df_cldr.sort_index(inplace=True)
for i_lang in completelist_languages:
#print(i_lang)
try:
locale = Locale.parse(i_lang)
df=pd.DataFrame.from_dict(locale.territories, orient="index")
df.columns = ['name_{0}'.format(i_lang)]
df.sort_index(inplace=True)
df_cldr=df_cldr.join(df)
except:
pass
return df_cldr
###################### MAIN ########################
import os
path_script=os.path.dirname(os.path.abspath(__file__))
#print path_script
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Fetch and generate the country and territory names in languages that are supported by the Unicode CLDR 25.""")
parser.add_argument("-o", "--output", dest="outputpath", default="geoname_CLDR25_babel.csv",
help="write data to a csv file or a tsv file", metavar="OUTPUTPATH")
args = parser.parse_args()
fn = args.outputpath
#print fn
df_cldr=import_from_babel_cldr()
if fn[-3:]=='csv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep=',')
elif fn[-3:]=='tsv':
print ("Outputing to {}".format(fn))
export_to_csv(df_cldr, ex_filename=os.path.join(path_script, fn), sep='\t')
else:
print ("Only csv and tsv formats can be generated. Sorry.")
| bsd-3-clause |
nagordon/mechpy | mechpy/composites.py | 1 | 71681 | # coding: utf-8
'''
Module for composite material analysis
Hyer-Stress Analysis of Fiber-Reinforced Composite Materials
Herakovich-Mechanics of Fibrous Composites
Daniel-Engineering Mechanics of Composite Materials
Kollar-Mechanics of COmposite Structures
NASA- Basic Mechancis of Lamianted Composites
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf
TODO:
* transverse shear stress reddy pg 136 or daniel pg 139
* include line loads (Qx,Qy) for combined loading
* calculate capability of panel based on margin
'''
#==============================================================================
# Import Modules
#==============================================================================
from __future__ import print_function, division
__author__ = 'Neal Gordon <nealagordon@gmail.com>'
__date__ = '2016-12-02'
__version__ = 0.1
from copy import copy
from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi
from numpy.linalg import solve, inv
#from scipy import linalg
import numpy as np
#np.set_printoptions(suppress=False,precision=2) # suppress scientific notation
np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf)
import scipy
from scipy.spatial import ConvexHull
#np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)})
import pandas as pd
import sympy as sp
from sympy import Function, dsolve, Eq, Derivative, symbols, pprint
from sympy.plotting import plot3d
#from sympy import cos, sin
#sp.init_printing(use_latex='mathjax')
#sp.init_printing(wrap_line=False, pretty_print=True)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (8,5)
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 14
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \
grid, show, xlabel,ylabel, tight_layout
from mpl_toolkits.mplot3d import axes3d
# if using ipython console, turn off inline plotting
#mpl.use('Qt5Agg')
# inline plotting
from IPython import get_ipython
#get_ipython().magic('matplotlib inline')
###disable inline plotting
try:
get_ipython().magic('matplotlib')
except:
pass
from IPython.display import display
import os
plt.close('all')
#==============================================================================
# Functions
#==============================================================================
def import_matprops(mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def Sf(E1,E2,nu12,G12):
'''transversely isptropic compliance matrix. pg 58 herakovich'''
nu21 = E2*nu12/E1
S = array([[1/E1, -nu21/E2, 0],
[-nu12/E1, 1/E2, 0],
[0, 0, 1/G12]])
return S
def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic compliance matrix.
For transversly isotropic
E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23))
'''
S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0],
[-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0],
[-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0],
[ 0, 0, 0, 1/G23, 0, 0],
[ 0, 0, 0, 0, 1/G13, 0],
[ 0, 0, 0, 0, 0, 1/G12]])
return S6
def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic stiffness matrix.
'''
C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23))
return C6
def Qf(E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def T61(th):
'''Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
reddy pg 91'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n],
[n**2, m**2, 0, 0, 0,-2*m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-m*n, m*n, 0, 0, 0,(m**2-n**2)]])
return T1
def T62(th):
'''Strain
voight notation for strain transform. epsilon1 = T2 @ epsilonx
th=ply angle in degrees
reddy pg 91
'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, 0, 0, 0, m*n],
[n**2, m**2, 0, 0, 0,-m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]])
return T2
def T1(th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2(th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def T1s(th):
'''Symbolic Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T1 = sp.Matrix( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2s(th):
'''Symbolic Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T2 = sp.Matrix( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def failure_envelope():
# failure envelopes
# max stress criteria
# 1 direction in first row
# 2 direction in second row
# failure strength in compression
#Fc = matrix([[-1250.0, -600.0],
# [-200.0, -120.0]]) # ksi
#
##failure strength in tension
#Ft = matrix([[1500, 1000]
# [50, 30]]) # ksi
#
##Failure strength in shear
#Fs = matrix( [100, 70] ) # Shear
Fc1 = [-1250, -600] # Compression 1 direction
Fc2 = [-200, -120] # Compression 2 direction
Ft1 = [1500, 1000] # Tension 1 direction
Ft2 = [50, 30] # Tension 2 direction
Fs = [100, 70] # Shear
# F1 = Ft(1);
# F2 = Ft(1);
# F6 = Fs(1);
for c in range(2):# mattype
factor = 1.25
# right
plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]])
# left
plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]])
# top
plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]])
# bottom
plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]])
# center horizontal
plot( [Fc1[c], Ft1[c]] , [0, 0])
# center vertical
plot( [0, 0] , [Fc2[c], Ft2[c]])
#xlim([min(Fc1) max(Ft1)]*factor)
#ylim([min(Fc2) max(Ft2)]*factor)
xlabel('$\sigma_1,ksi$')
ylabel('$\sigma_2,ksi$')
title('failure envelope with Max-Stress Criteria')
def material_plots(materials = ['Carbon_cloth_AGP3705H']):
'''
plotting composite properties
Sf(E1,E2,nu12,G12)
'''
# plt.rcParams['figure.figsize'] = (10, 8)
# plt.rcParams['font.size'] = 14
# plt.rcParams['legend.fontsize'] = 14
plyangle = arange(-45, 45.1, 0.1)
h = 1 # lamina thickness
layupname='[0]'
mat = import_matprops(materials)
Ex = mat[materials[0]].E1
Ey = mat[materials[0]].E2
nuxy = mat[materials[0]].nu12
Gxy = mat[materials[0]].G12
# layupname = '[0, 45, 45, 0]'
# Ex= 2890983.38
# Ey= 2844063.06
# nuxy= 0.27
# Gxy= 1129326.25
# h = 0.0600
plt.close('all')
S = Sf(Ex,Ey,nuxy,Gxy)
C = inv(S)
C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle]
C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle]
C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle]
C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle]
Exbar = zeros(len(plyangle))
Eybar = zeros(len(plyangle))
Gxybar = zeros(len(plyangle))
Q = Qf(Ex,Ey,nuxy,Gxy)
Qbar = zeros((len(plyangle),3,3))
for i,th in enumerate(plyangle):
Qbar[i] = solve(T1(th), Q) @ T2(th)
#Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle]
Qbar11 = Qbar[:,0,0]
Qbar22 = Qbar[:,1,1]
Qbar66 = Qbar[:,2,2]
Qbar12 = Qbar[:,0,1]
Qbar16 = Qbar[:,0,2]
Qbar26 = Qbar[:,1,2]
Aij = Qbar*h
# laminate Stiffness
# | Exbar Eybar Gxybar |
# A = | vxybar vyxbar etasxbar |
# | etaxsbar etaysbar etasybar |
# laminate Comnpliance
aij = zeros((len(plyangle),3,3))
for i, _Aij in enumerate(Aij):
aij[i] = inv(_Aij)
# material properties for whole laminate (Daniel, pg183)
Exbar = [1/(h*_aij[0,0]) for _aij in aij]
Eybar = [1/(h*_aij[1,1]) for _aij in aij]
Gxybar = [1/(h*_aij[2,2]) for _aij in aij]
# Global Stress
s_xy = array([[100],
[10],
[5]])
# local ply stress
s_12 = np.zeros((3,len(plyangle)))
for i,th in enumerate(plyangle):
#s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses
s_12[:,[i]] = T1(th) @ s_xy
# Plotting
figure()#, figsize=(10,8))
plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12)
legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$'])
title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208')
xlabel("$\Theta$")
ylabel('$\overline{C}_{ii}$, ksi')
grid()
figure()#, figsize=(10,8))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title("Constitutive Properties in various angles")
xlabel("$\Theta$")
ylabel("modulus, psi")
legend()
grid()
figure()#,figsize=(10,8))
plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' )
plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' )
plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' )
legend(loc='lower left')
xlabel("$\Theta$")
ylabel("Stress, ksi")
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar11, label = "Qbar11")
plot(plyangle,Qbar22, label = "Qbar22")
plot(plyangle,Qbar66, label = "Qbar66")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar12, label = "Qbar12")
plot(plyangle,Qbar16, label = "Qbar16")
plot(plyangle,Qbar26, label = "Qbar26")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname)
#df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar})
#print(df)
#df.to_csv(titlename+'.csv')
plt.figure(figsize=(9,6))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title(titlename)
xlabel("$\Theta$")
ylabel("modulus, psi")
legend(loc='best')
grid()
#plt.savefig(titlename+'.png')
show()
def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True):
'''
## function created to quickly create laminates based on given parameters
lamthk=1.5 # total #thickness of laminate
symang = [45,0,90, 30] #symmertic ply angle
plyratio=2.0 # lamina/matrix ratio
matrixlayers=False # add matrix layers between lamina plys
nonsym=False # symmetric
mat = material type, as in different plies, matrix layer, uni tapes, etc
#ply ratio can be used to vary the ratio of thickness between a matrix ply
and lamina ply. if the same thickness is desired, plyratio = 1,
if lamina is 2x as thick as matrix plyratio = 2
'''
if matrixlayers:
nply = (len(symang)*2+1)*2
nm = nply-len(symang)*2
nf = len(symang)*2
tm = lamthk / (plyratio*nf + nm)
tf = tm*plyratio
plyangle = zeros(nply//2)
mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2,
mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ]
plyangle[1:-1:2] = symang[:] # make a copy
thk = tm*ones(nply//2)
thk[2:2:-1] = tf
lamang = list(symang) + list(symang[::-1])
plyangle = list(plyangle) + list(plyangle[::-1])
mat = list(mat) + list(mat[::-1])
thk = list(thk) + list(thk[::-1])
else: # no matrix layers, ignore ratio
if balancedsymmetric:
nply = len(symang)*2
mat = list(3*np.ones(nply))
thk = list(lamthk/nply*np.ones(nply))
lamang = list(symang) + list(symang[::-1])
plyangle = list(symang) + list(symang[::-1])
else:
nply = len(symang)
mat =[1]*nply
thk = list(lamthk/nply*np.ones(nply))
lamang = symang[:]
plyangle = symang[:]
return thk,plyangle,mat,lamang
def make_quasi(n0=4,n45=4):
#n0 = 4
#n45 = 13
#
#ply0 = [0]*n0
#ply45 = [45]*n45
#plyangle = []
#from itertools import zip_longest
#for x,y in zip_longest(ply0,ply45):
# if len(plyangle)<min(len(ply0),len(ply45))*2:
# plyangle.append(x)
# plyangle.append(y)
# else:
# plyangle.append(x)
# plyangle.reverse()
# plyangle.append(y)
#plyangle = [x for x in plyangle if x is not None]
#plyangle
ntot = n45+n0
plyangle = [45]*int(n45)
for p in [0]*int(n0):
plyangle.append(p)
plyangle.reverse()
return plyangle
#@xw.func
def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints):
'''
code to compute composite properties, applied mechanical and thermal loads
and stress and strain
inputs
NM # force/moments lbs/in
ek # strain, curvature in/in
q0 = pressure
plyangle # angle for each ply
plymatindex # material for each ply
materials # list materials used,
general outline for computing elastic properties of composites
1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12
2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system
3) Determine Fiber orientation of each ply
4) Calculate the transformed stiffness Qxy in the global coordinate system
5) Determine the through-thicknesses of each ply
6) Determine the laminate stiffness Matrix (ABD)
7) Calculate the laminate compliance matrix by inverting the ABD matrix
8) Calculate the laminate engineering properties
# Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix
|sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx |
|sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky |
|sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy|
# Herakovich pg 84
Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2
transformation reminders - see Herakovich for details
sig1 = T1*sigx
sigx = inv(T1)*sig1
eps1 = T2*epsx
epsx = inv(T2)*epsx
sigx = inv(T1)*Q*T2*epsx
Qbar = inv(T1)*Q*T2
Sbar = inv(T2)*inv(Q)*T2
Notes, core transverse direction is G13, ribbon direction is G23
a_width = 50 # plate width (inches or meters)
b_length = 50 # laminate length, inches or meters
'''
#==========================================================================
# Initialize python settings
#==========================================================================
#get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Define composite properties
#==========================================================================
assert(len(plyangle)==len(plymatindex))
a_width, b_length = platedim
# either apply strains or loads , lb/in
Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM
NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]])
ex_, ey_, exy_, kx_, ky_, kxy_ = ek
epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]])
Ti = 0 # initial temperature (C)
Tf = 0 # final temperature (C)
#SF = 1.0 # safety factor
#==========================================================================
# Import Material Properties
#==========================================================================
mat = import_matprops(materials)
#mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich
alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]])
''' to get ply material info, use as follows
alpha = alphaf(mat[materials[plymatindex[i]]])
mat[materials[1]].E2
'''
laminatethk = array([mat[materials[i]].plythk for i in plymatindex ])
nply = len(laminatethk) # number of plies
H = np.sum(laminatethk) # plate thickness
# area = a_width*H
z = zeros(nply+1)
zmid = zeros(nply)
z[0] = -H/2
for i in range(nply):
z[i+1] = z[i] + laminatethk[i]
zmid[i] = z[i] + laminatethk[i]/2
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
A += Qbar*(z[i+1]-z[i])
# coupling stiffness
B += (1/2)*Qbar*(z[i+1]**2-z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(z[i+1]**3-z[i]**3)
#Cbar6 = T61 @ C6 @ np.transpose(T61)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B + zoffset*A
ABD[3:6,0:3] = B + zoffset*A
ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A
# laminatee compliance
abcd = inv(ABD)
a = abcd[0:3,0:3]
#==========================================================================
# Laminate Properties
#==========================================================================
# effective laminate shear coupling coefficients
etasxbar = a[0,2]/a[2,2]
etasybar = a[1,2]/a[2,2]
etaxsbar = a[2,0]/a[0,0]
etaysbar = a[2,1]/a[1,1]
# laminate engineer properties
Exbar = 1 / (H*a[0,0])
Eybar = 1 / (H*a[1,1])
Gxybar = 1 / (H*a[2,2])
nuxybar = -a[0,1]/a[0,0]
nuyxbar = -a[0,1]/a[1,1]
# TODO: validate results, does not appear to be correct
# strain centers, pg 72, NASA-Basic mechanics of lamianted composites
# added divide by zero epsilon
z_eps0_x = -B[0,0] / (D[0,0] + 1e-16)
z_eps0_y = -B[0,1] / (D[0,1] + 1e-16)
z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16)
z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center
# --------------------- Double Check ---------------------
# # Laminate compliance matrix
# LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar],
# [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar],
# [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] )
# # Daniel pg 183
# # combines applied loads and applied strains
# strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3]
# Nxyz = A @ strain_laminate
# stress_laminate = Nxyz/H
# --------------------------------------------------------
#==========================================================================
# Pressure Load
#==========================================================================
#==========================================================================
# pressure displacement and moments
#==========================================================================
D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2]
B11 = B[0,0]
A11, A12 = A[0,0], A[0,1]
# reddy pg 247 Navier displacement solution for a simply supported plate
s = b_length/a_width
x = a_width/2
y = b_length/2
# 5.2.8, reddy, or hyer 13.123
terms = 5
w0 = 0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
w0 += Wmn * sin(alpha*x) * sin(beta*y)
w0_simplesupport = w0
# 5.2.12a, reddy
# mid span moments
Mxq=Myq=Mxyq=0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length)
Mxyq = -2*Mxyq
NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]]
# hyer, x-pin-pin, y-free-free plate reaction forces, pg 619
# Forces and Moments across the width of the plate
A11R = A11*(1-B11**2/(A11*D11))
D11R = D11*(1-B11**2/(A11*D11))
Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12
Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2)
Nxyq0 = lambda x: 0
Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2)
Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2))
Mxyq0 = lambda x: 0
# clamped plate 5.4.11, reddy
#w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) )
# reddy, 5.4.12
w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4)
# reddy, 5.4.15
#w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4)
# reddy 5.4.15, for isotropic D11=D
w0_clamped_isotropic = 0.00134*q0*a_width**4/D11
#==========================================================================
# Applied Loads and pressure loads
#==========================================================================
NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp
#==========================================================================
# Thermal Loads
#==========================================================================
'''
if the material is isotropic and unconstrained, then no thermal stresses
will be experienced. If there are constraints, then the material will experience
thermally induced stresses. As with orthotropic materials, various directions will have
different stresses, and when stacked in various orientations, stresses can be
unintuitive and complicated. Global Thermal strains are subtracted from applied strains
# 1) determine the free unrestrained thermal strains in each layer, alphabar
'''
dT = Tf-Ti
Nhatth= zeros((3,1)) # unit thermal force in global CS
Mhatth = zeros((3,1)) # unit thermal moment in global CS
alphabar = zeros((3,nply)) # global ply CTE
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
alpha = alphaf(mat[materials[plymatindex[i]]])
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
alphabar[:,[i]] = solve(T2(plyangle[i]), alpha)
#alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS
Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads
Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2)
NMhatth = np.vstack((Nhatth,Mhatth))
NMbarth = NMhatth*dT # resultant thermal loads
# Laminate CTE
epsilonhatth = abcd@NMhatth # laminate CTE
# applied loads and thermal loads
epsilonbarapp = abcd @ NMbarapptotal
epsilonbarth = abcd @ NMbarth # resultant thermal strains
epsilonbartotal = epsilonbarapp + epsilonbarth
# Composite respone from applied mechanical loads and strains. Average
# properties only. Used to compare results from tensile test.
#epsilon_laminate = abcd@NMbarapptotal
#sigma_laminate = ABD@epsilon_laminate/H
epsilon_laminate = epsilonbartotal[:]
sigma_laminate = ABD@epsilonbartotal/H
alpha_laminate = a@Nhatth
# determine thermal load and applied loads or strains Hyer pg 435,452
Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test
Ny = NMbarapptotal[1,0]*b_length # units kN
#==========================================================================
# Thermal and mechanical local and global stresses at the ply interface
#==========================================================================
# Declare variables for plotting
epsilon_app = zeros((3,2*nply))
sigma_app = zeros((3,2*nply))
epsilonbar_app = zeros((3,2*nply))
sigmabar_app = zeros((3,2*nply))
epsilon_th = zeros((3,2*nply))
sigma_th = zeros((3,2*nply))
epsilonbar_th = zeros((3,2*nply))
sigmabar_th = zeros((3,2*nply))
epsilon = zeros((3,2*nply))
epsilonbar = zeros((3,2*nply))
sigma = zeros((3,2*nply))
sigmabar = zeros((3,2*nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress is calcuated at top and bottom of each ply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
### transverse shear, herakovich pg 254
#Q44 = mat[materials[plymatindex[i]]].G23
#Q55 = mat[materials[plymatindex[i]]].G13
#Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2
#Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2
#Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i])
#epsilontransverse = array([[gammayz],[gammaxz]])
#sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse
# Global stresses and strains, applied load only
epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7]
epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7]
sigbarapp1 = Qbar @ epsbarapp1
sigbarapp2 = Qbar @ epsbarapp2
# Local stresses and strains, appplied load only
epsapp1 = T2(plyangle[i]) @ epsbarapp1
epsapp2 = T2(plyangle[i]) @ epsbarapp2
sigapp1 = Q @ epsapp1
sigapp2 = Q @ epsapp2
# Interface Stresses and Strains
epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2))
epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2))
sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2))
sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2))
# Global stress and strains, thermal loading only
epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
sigbarth1 = Qbar @ epsbarth1
sigbarth2 = Qbar @ epsbarth2
# Local stress and strains, thermal loading only
epsth1 = T2(plyangle[i]) @ epsbarth1
epsth2 = T2(plyangle[i]) @ epsbarth2
sigth1 = Q @ epsth1
sigth2 = Q @ epsth2
# Interface Stresses and Strains
epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2))
epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2))
sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2))
# TOTAL global stresses and strains, applied and thermal
epsbar1 = epsbarapp1 + epsbarth1
epsbar2 = epsbarapp2 + epsbarth2
sigbar1 = Qbar @ epsbar1
sigbar2 = Qbar @ epsbar2
# TOTAL local stresses and strains , applied and thermal
eps1 = T2(plyangle[i]) @ epsbar1
eps2 = T2(plyangle[i]) @ epsbar2
sig1 = Q @ eps1
sig2 = Q @ eps2
# Interface Stresses and Strains
epsilon[:,k:k+2] = np.column_stack((eps1,eps2))
epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma[:,k:k+2] = np.column_stack((sig1,sig2))
sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2))
#==========================================================================
# Strength Failure Calculations
#==========================================================================
# Strength Ratio
STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply))
# Failure Index
FAILUREINDEX_MAXSTRESS = zeros((3,2*nply))
STRENGTHRATIO_TSAIWU = zeros((nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress
s1 = sigma[0,k]
s2 = sigma[1,k]
s12 = np.abs(sigma[2,k])
# strength
F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c
F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# Max Stress failure index ,failure if > 1, then fail, FI = 1/SR
FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1
FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2
FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12
# Tsai Wu, failure occures when > 1
F1t = mat[materials[plymatindex[i]]].F1t
F1c = mat[materials[plymatindex[i]]].F1c
F2t = mat[materials[plymatindex[i]]].F2t
F2c = mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# inhomogeneous Tsai-Wu criterion # from Daniel
# http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf
f1 = 1/F1t + 1/F1c
f2 = 1/F2t + 1/F2c
f11 = -1/(F1t*F1c)
f22 = -1/(F2t*F2c)
f66 = 1/F12**2
f12 = -0.5*sqrt(f11*f22)
#TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2
# polynomial to solve. Added a machine epsilon to avoid divide by zero errors
lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16
lam2 = f1*s1 + f2*s2 + 1e-16
lam3 = -1
# smallest positive root
roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) ,
(-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] )
STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio
# f1 = 1/F1t - 1/F1c
# f2 = 1/F2t - 1/F2c
# f11 = 1/(F1t*F1c)
# f22 = 1/(F2t*F2c)
# f66 = 1/F12**2
# STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2)))
### Apply safety factors
FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF
STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF
###
MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety
# strength ratio for max stress, if < 1, then fail, SR = 1/FI
STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16)
# margin of safety based on max stress criteria
MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1
# minimum margin of safety for Max stress failure
MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min()
FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max()
# minimum margin of safety of both Tsai-Wu and Max Stress
#MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() )
# find critial values for all failure criteria
#MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf
#MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf
#==========================================================================
# Buckling Failure Calculations
#==========================================================================
''' Buckling of Clamped plates under shear load, reddy, 5.6.17'''
k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4
k12 = 23.107/(a_width*b_length)
k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4
Nxycrit0 = 1/k12*np.sqrt(k11*k22)
FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1
MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1
'''Kassapoglous pg 126,137
simply supported plate buckling, assumes Nx>0 is compression
Nxcrit0 is the axial load that causes buckling
Nxycrit0 is the shear load that cause buckling
Nxcrit is the axial load part of a combined load that causes buckling
Nxycrit is the shear load part of a combined load that causes buckling
'''
# no buckling issues if Nx is positive
# buckling calcuations assumes Nx compression is positive.
Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0)
Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok
# Nxy=0
Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
# Nx=0
Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0
if Nx__ == 0 or Nxy__ == 0:
FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0
FI_Nx0_buckling = (Nx__*SF)/Nxcrit0
else:
# interaction term
k = Nxy__ / Nx__
Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ,
abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) )
Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0)
# interactive calc
FI_Nx_buckling = (Nx__ *SF)/Nxcrit
FI_Nxy_buckling = (Nxy__*SF)/Nxycrit
FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling,
FI_Nx0_buckling,
FI_Nx_buckling,
FI_Nxy_buckling] )
MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1
#==========================================================================
# Facesheet Wrinkling
#==========================================================================
#==========================================================================
# principal lamainte stresses
#==========================================================================
sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0],
[sigma_laminate[2,0],sigma_laminate[1,0],0],
[0,0,0]]))[0]
tauxy_p = sigma_laminate[2,0]
sigmax_p = sigma_laminate[0,0]
sigmay_p = sigma_laminate[1,0]
thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi
#==========================================================================
# Printing Results
#==========================================================================
if prints:
print('--------------- laminate1 Stress analysis of fibers----------')
print('(z-) plyangles (z+)'); print(plyangle)
print('(z-) plymatindex (z+)'); print(plymatindex)
print('ply layers') ; print(z)
print('lamiante thickness, H = {:.4f}'.format(H))
#print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x))
#print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y))
#print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy))
#print('shear center laminate center, z_sc = {:.4f}'.format(z_sc))
print('Applied Loads'); print(NM)
print('ABD=');print(ABD)
print('Ex= {:.2f}'.format(Exbar) )
print('Ey= {:.2f}'.format(Eybar) )
print('nuxy= {:.2f}'.format(nuxybar) )
print('Gxy= {:.2f}'.format(Gxybar) )
print('epsilon_laminate') ; print(epsilon_laminate)
print('sigma_laminate') ; print(sigma_laminate)
print('sigma_principal_laminate') ; print(sigma_principal_laminate)
print('principal_angle = {:.2f} deg'.format(thetap))
print('NMbarapp') ; print(NMbarapp)
print('sigma') ; print(sigma)
print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) )
print(MARGINSAFETY_MAXSTRESS)
print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min()))
print(MARGINSAFETY_TSAIWU)
print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max ))
print(FAILUREINDEX_MAXSTRESS)
print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling))
# print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) )
# print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) )
# print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) )
# print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) )
# print('---- Buckling Failure Index (fail>1) combined loads and simple support -----')
# print(FI_combinedload_simplesupport_buckle)
print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling)))
print('Mx_midspan = {:.2f}'.format(Mxq) )
print('My_midspan = {:.2f}'.format(Myq) )
print('Mxy_midspan = {:.2f}'.format(Mxyq) )
print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) )
print('w0_clamped = {:.6f}'.format(w0_clamped) )
print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) )
#display(sp.Matrix(sigmabar))
#==========================================================================
# Plotting
#==========================================================================
if plots:
windowwidth = 800
windowheight = 450
zplot = zeros(2*nply)
for i,k in enumerate(range(0,2*nply,2)): # = nply
zplot[k:k+2] = z[i:i+2]
#legendlab = ['total','thermal','applied','laminate']
# global stresses and strains
mylw = 1.5 #linewidth
# Global Stresses and Strains
f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$']
strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal')
ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
#ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6))
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,50,windowwidth,windowheight)
except:
pass
f1.show()
#plt.savefig('global-stresses-strains.png')
### Local Stresses and Strains
f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$']
strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$']
strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] ,
[ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] ,
[ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ]
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
### plots strengths
#ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw)
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### Failure
f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True)
f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle))
stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
#ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100))
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total')
ax.grid(True)
ax.set_title('Failure Index, fail if > 1')
#leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### warpage
res = 100
Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res))
epsx = epsilon_laminate[0,0]
epsy = epsilon_laminate[1,0]
epsxy = epsilon_laminate[2,0]
kapx = epsilon_laminate[3,0]
kapy = epsilon_laminate[4,0]
kapxy = epsilon_laminate[5,0]
### dispalcement
w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt)
u = epsx*Xplt # pg 451 hyer
fig = plt.figure('plate-warpage')
ax = fig.gca(projection='3d')
ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3)
###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10])
ax.set_xlabel('plate width,y-direction,in')
ax.set_ylabel('plate length,x-direction, in')
ax.set_zlabel('warpage,in')
#ax.set_zlim(-0.01, 0.04)
#mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450)
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight)
except:
pass
plt.show()
#plt.savefig('plate-warpage')
return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max
def plate():
'''
composite plate mechanics
TODO - results need vetted
'''
#==========================================================================
# Initialize
#==========================================================================
get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Import Material Properties
#==========================================================================
plythk = 0.0025
plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply
nply = len(plyangle) # number of plies
laminatethk = np.zeros(nply) + plythk
H = sum(laminatethk) # plate thickness
# Create z dimensions of laminate
z_ = np.linspace(-H/2, H/2, nply+1)
a = 20 # plate width;
b = 10 # plate height
q0_ = 5.7 # plate load;
# Transversly isotropic material properties
E1 = 150e9
E2 = 12.1e9
nu12 = 0.248
G12 = 4.4e9
nu23 = 0.458
G23 = E2 / (2*(1+nu23))
# Failure Strengths
F1t = 1500e6
F1c = -1250e6
F2t = 50e6
F2c = -200e6
F12t = 100e6
F12c = -100e6
Strength = np.array([[F1t, F1c],
[F2t, F2c],
[F12t, F12c]])
th = sp.symbols('th')
# Stiffnes matrix in material coordinates
Cijm6 = inv(Sij6)
# reduced stiffness in structural
Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0],
[Cij6[0,1], Cij6[1,1], 0],
[0, 0, Cij6[5,5] ]] )
Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)],
[sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)],
[-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]])
## Cylindrical Bending of a laminated plate
# displacement in w (z direction)
from sympy.abc import x
f = Function('f')
eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False)
pprint(eq)
#==============================================================================
th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16')
wfun = Function('wfun')
ufun = Function('ufun')
## EQ 4.4.1a
eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3)
#eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1
## EQ 4.4.1b
#eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1
eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3)
## EQ 4.4.1c
#eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0;
eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0
################## python conversion eded here ################################
# solve eq1 eq2 and eq3 to get the w and u functions
# displacement in w (z direction) from eq1,eq2,eq3
wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4
# displacement in u (x direction) from eq1,eq2,eq3
ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7
# Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3)
# cond1 -> w(0)=0 at x(0), roller
C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0
# cond2 -> angle at dw/dx at x(0) is 0, cantilever
C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0
# cond3 -> w(z) = 0 at x(a), roller
C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3
# cond4 u = 0 at x = 0
C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0
# u=0 at x = a
C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6
# cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment
C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3
# EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx
C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3
C3sol = sp.solve(C6sol1 - C6sol2,C3)[0]
C4sol = C4sol1.subs(C3,C3sol)
C6sol = sp.simplify(C6sol2.subs(C3,C3sol))
C5sol = sp.simplify(C5sol1.subs(C6,C6sol))
# substitute integration constants with actual values( _ is actual number)
C1_ = copy(C1sol)
C2_ = copy(C2sol)
C7_ = copy(C7sol)
C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function w(x) vertical displacement w along z with actual vaules
wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function u(x) horizontal displacement u along x with actual vaules
usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# 3d plots
plot3d(wsol,(x,0,a), (y,0,b))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Cylindrical Bending -Displacement of a plate With CLPT')
## Strain calculation
# eq 3.3.8 (pg 116 reddy (pdf = 138))
epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]])
epsx = epstotal[0,0]
## Calculating and plotting Stress in each layer
res = 8 # accuracy of finding max and min stress
xplot = linspace(0,a,res)
yplot = linspace(0,b,res)
G0 = sp.symbols('G0')
Globalminstress = np.zeros((3, nply))
Globalmaxstress = np.zeros((3, nply))
for kstress in range(3): # stress state s_x, s_y, s_xz
plt.figure(kstress+1)
for klay in range(nply): # loop through all layers
thplot = plyangle[klay]
zplot = linspace(z_[klay],z_[klay+1],res)
stressplot = np.zeros((len(zplot),len(xplot)))
## Calc Stresses
if kstress == 2:
# Shear stresses
G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0
# solve for shear stresses from s_1
s_xz = sp.solve(G0_,G0)[0]
# out of plane shear S_xz does not need to be transformed ??
plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) )
else:
# normal stresses
# Cij = reduced structural stiffness in strictural coordinates 3x3
# stress in structural coordinates
s_stress = Cij.subs(th,thplot) @ epstotal
# stressin material coordinates
m_stress = Tij.subs(th,thplot) @ s_stress
#ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)])
## find max stress in each layer
ii=0
for i in xplot:
jj=0
for j in zplot:
if kstress == 2:
stressplot[ii,jj] = s_xz.subs({x:i, z:j})
else:
stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j})
jj+=jj
ii+=ii
Globalminstress[kstress,klay] = np.min(stressplot)
Globalmaxstress[kstress,klay] = np.max(stressplot)
#
plt.title('\sigma_%i' % kstress)
## Plot max stress and failure strength
plt.figure()
for i in range(3):
plt.subplot(1, 3, i+1)
plt.bar(range(nply), Globalmaxstress[i,:])
plt.bar(range(nply), Globalminstress[i,:])
plt.scatter(range(nply),np.ones(nply) * Strength[i,0])
plt.scatter(range(nply),np.ones(nply) * Strength[i,1])
plt.xlabel('layer')
plt.title('\sigma%i' % i)
def plate_navier():
'''
composite plate bending with navier solution
TODO - code needs to be converted from matlab
'''
## Plate a*b*h simply supported under q = q0 CLPT
pass
'''
q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y')
Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b))
dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4)
Wmn = Qmn/dmn;
w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b);
w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] );
figure
w0sum = 0;
for n_ = 1:10
for m_ = 1:10
w0sum = w0sum + subs(w0_,[n m],[n_ m_]);
end
end
w0sum;
% xplot = linspace(0,a_,res);
% yplot = linspace(0,b_,res);
ii=1;
for i = xplot
jj=1;
for j = yplot
w0plot(ii,jj) = subs(w0sum,[x y],[i j]);
jj=jj+1;
end
ii=ii+1;
end
surf(xplot,yplot,w0plot)
colorbar
set(gca,'PlotBoxAspectRatio',[2 1 1]);
xlabel('length a, u(x)')
ylabel('length b, v(y)')
zlabel('w(z)')
'''
class laminate(object):
"""
IN-WORK - laminate object for composite material analysis
"""
# constructor
def __init__(self, plyangle, matindex, matname):
# run when laminate is instantiated
# loads materials used
self.plyangle = plyangle
self.matindex = matindex
self.matname = matname
self.__mat = self.__import_matprops(matname)
# create a simple function to handle CTE properties
def __alphaf(self, mat):
return array([[mat.alpha1], [mat.alpha2], [0]])
self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ])
self.nply = len(self.laminatethk) # number of plies
self.H = np.sum(self.laminatethk) # plate thickness
# area = a_width*H
z = zeros(self.nply+1)
zmid = zeros(self.nply)
z[0] = -self.H/2
for i in range(self.nply):
z[i+1] = z[i] + self.laminatethk[i]
zmid[i] = z[i] + self.laminatethk[i]/2
self.z = z
self.zmid = zmid
self.__abdmatrix()
def __Qf(self, E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def __T1(self, th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def __T2(self, th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
# private method
def __abdmatrix(self):
'''used within the object but not accessible outside'''
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(self.nply): # = nply
Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1,
self.__mat[self.matname[self.matindex[i]]].E2,
self.__mat[self.matname[self.matindex[i]]].nu12,
self.__mat[self.matname[self.matindex[i]]].G12 )
Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i])
A += Qbar*(self.z[i+1]-self.z[i])
# coupling stiffness
B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B
ABD[3:6,0:3] = B
ABD[3:6,3:6] = D
self.ABD = ABD
# method
def available_materials(self):
'''show the materials available in the library'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
print('---available materials---')
for k in matprops.columns.tolist():
print(k)
print('-------------------------')
# private method to be used internally
def __import_matprops(self, mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup):
'''
find the miniumu margin give load conditions
'''
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
_, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= layup,
plymatindex=[0,0,0,0],
materials = [mymat],
platedim=[10,10],
zoffset=0,
SF=1.0,
plots=0,
prints=0)
return FAILUREINDEX_MAXSTRESS_max
def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ):
'''
loops through and tries to find a load that is close to 0 and then
attempts to find the root (ie margin=0)
older version used newton method for root finding
scipy.optimize.newton(laminate_min, guess)
TODO: Current calculation is stupid using random points to plot. fix it
by use FI, failure index instead of margin to generate a
linear relationship and envelope
'''
#laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0)
loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0']
laminate_min_list = []
laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup))
envelope_loads = []
N_t = array([0,1])
N_c = array([0,-1])
for loadname,laminate_min in zip(loadnamelist,laminate_min_list):
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
N_crit_t = (1-b) / m
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
N_crit_c = (1-b) / m
envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c))
print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat))
for k in envelope_loads:
print(k)
# plot envelope
Nx_env = []
Nxy_env = []
laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup)
n = 500
f = 1.25 # < 1
# arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n)
# arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n)
# Nx_r = np.concatenate((arr1, arr2))
#
# arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n)
# arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n)
# Nxy_r = np.concatenate((arr1, arr2))
Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n)
Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n)
for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r):
FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri)
if FI < 1:
Nx_env.append(Nx_ri)
Nxy_env.append(Nxy_ri)
points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)])
hull = scipy.spatial.ConvexHull(points)
plot(points[:,0], points[:,1], 'bo')
for simplex in hull.simplices:
plot(points[simplex, 0], points[simplex, 1], 'k-')
xlabel('Nx, lb/in')
ylabel('Nxy, lb/in')
title('Failure envelope')
return envelope_loads
def my_laminate_with_loading():
# loads lbs/in
Nx = 50
Ny = 0
Nxy = 0
Mx = 0
My = 0
Mxy = 0
q0 = 0 # pressure
# Qx = 0
# Qy = 0
a_width = 50
b_length = 3.14*6.75
## sandwich laminate
# plyangle= [45,45,0, 45,45],
# plymatindex=[0, 0, 1, 0, 0],
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= [0,60,-60,-60,60,0],
plymatindex=[0,0,0,0,0,0],
materials = ['E-Glass Epoxy Uni'],
platedim=[a_width,b_length],
zoffset=0,
SF=2.0,
plots=0,
prints=1)
if __name__=='__main__':
#plot_single_max_failure_loads()
#plot_failure_index()
my_laminate_with_loading()
#material_plots(['E-Glass Epoxy fabric M10E-3783'])
#plate()
#plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H'])
#plot_single_max_failure_loads()
# # reload modules
# import importlib ; importlib.reload
# from composites import laminate
# plyangle = [0,45]
# matindex = [0,0]
# matname = ['graphite-polymer_SI']
# lam1 = laminate(plyangle, matindex, matname)
# lam1.ABD
| mit |
pedro-aaron/stego-chi-2 | embeddingRgb.py | 1 | 2081 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
| mit |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit |
hrjn/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/core/window.py | 3 | 68731 | """
provide a generic structure to support window functions,
similar to how we have a Groupby object
"""
from __future__ import division
import warnings
import numpy as np
from collections import defaultdict
from datetime import timedelta
from pandas.core.dtypes.generic import (
ABCSeries,
ABCDataFrame,
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ABCDateOffset)
from pandas.core.dtypes.common import (
is_integer,
is_bool,
is_float_dtype,
is_integer_dtype,
needs_i8_conversion,
is_timedelta64_dtype,
is_list_like,
_ensure_float64,
is_scalar)
from pandas.core.base import (PandasObject, SelectionMixin,
GroupByMixin)
import pandas.core.common as com
import pandas._libs.window as _window
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Substitution, Appender,
cache_readonly)
from pandas.core.generic import _shared_docs
from textwrap import dedent
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
same type as input
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
class _Window(PandasObject, SelectionMixin):
_attributes = ['window', 'min_periods', 'freq', 'center', 'win_type',
'axis', 'on', 'closed']
exclusions = set()
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0, on=None, closed=None,
**kwargs):
if freq is not None:
warnings.warn("The freq kw is deprecated and will be removed in a "
"future version. You can resample prior to passing "
"to a window function", FutureWarning, stacklevel=3)
self.__dict__.update(kwargs)
self.blocks = []
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.freq = freq
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self):
return None
@property
def _on(self):
return None
@property
def is_freq_type(self):
return self.win_type == 'freq'
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not \
is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in \
['right', 'both', 'left', 'neither']:
raise ValueError("closed must be 'right', 'left', 'both' or "
"'neither'")
def _convert_freq(self, how=None):
""" resample according to the how, return a new object """
obj = self._selected_obj
index = None
if (self.freq is not None and
isinstance(obj, (ABCSeries, ABCDataFrame))):
if how is not None:
warnings.warn("The how kw argument is deprecated and removed "
"in a future version. You can resample prior "
"to passing to a window function", FutureWarning,
stacklevel=6)
obj = obj.resample(self.freq).aggregate(how or 'asfreq')
return obj, index
def _create_blocks(self, how):
""" split data into blocks & return conformed data """
obj, index = self._convert_freq(how)
if index is not None:
index = self._on
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]),
copy=False)
blocks = obj.as_blocks(copy=False).values()
return blocks, obj, index
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self):
return self.__class__.__name__
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self._window_type,
attrs=','.join(attrs))
def _get_index(self, index=None):
"""
Return index as ndarrays
Returns
-------
tuple of (index, index_as_ndarray)
"""
if self.is_freq_type:
if index is None:
index = self._on
return index, index.asi8
return index, index
def _prep_values(self, values=None, kill_inf=True, how=None):
if values is None:
values = getattr(self._selected_obj, 'values', self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = _ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError("ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(
action=self._window_type,
dtype=values.dtype))
else:
try:
values = _ensure_float64(values)
except (ValueError, TypeError):
raise TypeError("cannot handle this type -> {0}"
"".format(values.dtype))
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None):
""" wrap a single result """
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(
result.ravel(), unit='ns').values.reshape(result.shape)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj):
"""
wrap the results
Paramters
---------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
"""
from pandas import Series, concat
from pandas.core.index import _ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = _ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
if not len(final):
return obj.astype('float64')
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window):
""" center the result in the window """
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
return self.apply(arg, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs['sum'] = dedent("""
%(name)s sum
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
_shared_docs['mean'] = dedent("""
%(name)s mean
Parameters
----------
how : string, default None
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
class Window(_Window):
"""
Provides rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
this will default to 1.
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. See the notes below.
on : string, optional
For a DataFrame, column on which to calculate
the rolling window, rather than the index
closed : string, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
axis : int or string, default 0
Returns
-------
a Window or Rolling sub-classed for the particular operation
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 1.0
2 2.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicity set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
....: index = [pd.Timestamp('20130101 09:00:00'),
....: pd.Timestamp('20130101 09:00:02'),
....: pd.Timestamp('20130101 09:00:03'),
....: pd.Timestamp('20130101 09:00:05'),
....: pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
"""
def validate(self):
super(Window, self).validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window < 0:
raise ValueError("window must be non-negative")
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window '
'weight')
if not isinstance(self.win_type, compat.string_types):
raise ValueError('Invalid win_type {0}'.format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError('Invalid win_type {0}'.format(self.win_type))
else:
raise ValueError('Invalid window {0}'.format(window))
def _prep_window(self, **kwargs):
"""
provide validation for our window type, return the window
we have already been validated
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com._asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] + _pop_args(win_type,
arg_map[win_type],
kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : boolean, default True
If True computes weighted mean, else weighted sum
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return _window.roll_window(np.concatenate((arg,
additional_nans))
if center else arg, window, minp,
avg=mean)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
See also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name='window')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None) # noqa
groupby = kwargs.pop('groupby', None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super(GroupByMixin, self).__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch('count')
corr = GroupByMixin._dispatch('corr', other=None, pairwise=None)
cov = GroupByMixin._dispatch('cov', other=None, pairwise=None)
def _apply(self, func, name, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, compat.string_types):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(self, func, name=None, window=None, center=None,
check_minp=None, how=None, **kwargs):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
name : string, optional
name of this function
window : int/array, default to _get_window()
center : boolean, default to self.center
check_minp : function, default to _use_window
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj, index = self._create_blocks(how=how)
index, indexi = self._get_index(index=index)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = _ensure_float64(arg)
return cfunc(arg,
window, minp, indexi, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(np.concatenate((x, additional_nans)),
window, min_periods=self.min_periods,
closed=self.closed)
else:
def calc(x):
return func(x, window, min_periods=self.min_periods,
closed=self.closed)
with np.errstate(all='ignore'):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, blocks, obj)
class _Rolling_and_Expanding(_Rolling):
_shared_docs['count'] = """%(name)s count of number of non-NaN
observations inside provided window."""
def count(self):
blocks, obj, index = self._create_blocks(how=None)
index, indexi = self._get_index(index=index)
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(result, window=window, min_periods=0,
center=self.center,
closed=self.closed).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs['apply'] = dedent(r"""
%(name)s function apply
Parameters
----------
func : function
Must produce a single value from an ndarray input
\*args and \*\*kwargs are passed to the function""")
def apply(self, func, args=(), kwargs={}):
# TODO: _level is unused?
_level = kwargs.pop('_level', None) # noqa
window = self._get_window()
offset = _offset(window, self.center)
index, indexi = self._get_index()
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
return _window.roll_generic(arg, window, minp, indexi, closed,
offset, func, args, kwargs)
return self._apply(f, func, args=args, kwargs=kwargs,
center=False)
def sum(self, *args, **kwargs):
nv.validate_window_func('sum', args, kwargs)
return self._apply('roll_sum', 'sum', **kwargs)
_shared_docs['max'] = dedent("""
%(name)s maximum
Parameters
----------
how : string, default 'max'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def max(self, how=None, *args, **kwargs):
nv.validate_window_func('max', args, kwargs)
if self.freq is not None and how is None:
how = 'max'
return self._apply('roll_max', 'max', how=how, **kwargs)
_shared_docs['min'] = dedent("""
%(name)s minimum
Parameters
----------
how : string, default 'min'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def min(self, how=None, *args, **kwargs):
nv.validate_window_func('min', args, kwargs)
if self.freq is not None and how is None:
how = 'min'
return self._apply('roll_min', 'min', how=how, **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func('mean', args, kwargs)
return self._apply('roll_mean', 'mean', **kwargs)
_shared_docs['median'] = dedent("""
%(name)s median
Parameters
----------
how : string, default 'median'
.. deprecated:: 0.18.0
Method for down- or re-sampling""")
def median(self, how=None, **kwargs):
if self.freq is not None and how is None:
how = 'median'
return self._apply('roll_median_c', 'median', how=how, **kwargs)
_shared_docs['std'] = dedent("""
%(name)s standard deviation
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func('std', args, kwargs)
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(_window.roll_var(arg, window, minp, indexi,
self.closed, ddof))
return self._apply(f, 'std', check_minp=_require_min_periods(1),
ddof=ddof, **kwargs)
_shared_docs['var'] = dedent("""
%(name)s variance
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func('var', args, kwargs)
return self._apply('roll_var', 'var',
check_minp=_require_min_periods(1), ddof=ddof,
**kwargs)
_shared_docs['skew'] = """Unbiased %(name)s skewness"""
def skew(self, **kwargs):
return self._apply('roll_skew', 'skew',
check_minp=_require_min_periods(3), **kwargs)
_shared_docs['kurt'] = """Unbiased %(name)s kurtosis"""
def kurt(self, **kwargs):
return self._apply('roll_kurt', 'kurt',
check_minp=_require_min_periods(4), **kwargs)
_shared_docs['quantile'] = dedent("""
%(name)s quantile
Parameters
----------
quantile : float
0 <= quantile <= 1""")
def quantile(self, quantile, **kwargs):
window = self._get_window()
index, indexi = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return _window.roll_max(arg, window, minp, indexi,
self.closed)
elif quantile == 0.0:
return _window.roll_min(arg, window, minp, indexi,
self.closed)
else:
return _window.roll_quantile(arg, window, minp, indexi,
self.closed, quantile)
return self._apply(f, 'quantile', quantile=quantile,
**kwargs)
_shared_docs['cov'] = dedent("""
%(name)s sample covariance
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used
and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.""")
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype('float64')
Y = Y.astype('float64')
mean = lambda x: x.rolling(window, self.min_periods,
center=self.center).mean(**kwargs)
count = (X + Y).rolling(window=window,
center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
_shared_docs['corr'] = dedent("""
%(name)s sample correlation
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations
will be used.""")
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
b = b.rolling(window=window, min_periods=self.min_periods,
freq=self.freq, center=self.center)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(self._on,
(ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex))
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif (isinstance(self.obj, ABCDataFrame) and
self.on in self.obj.columns):
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError("invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on))
def validate(self):
super(Rolling, self).validate()
# we allow rolling on a datetimelike index
if ((self.obj.empty or self.is_datetimelike) and
isinstance(self.window, (compat.string_types, ABCDateOffset,
timedelta))):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError("center is not implemented "
"for datetimelike and offset "
"based windows")
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = 'freq'
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError("closed only implemented for datetimelike "
"and offset based windows")
def _validate_monotonic(self):
""" validate on is monotonic """
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted))
def _validate_freq(self):
""" validate & return our freq """
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} in not "
"compat with a datetimelike "
"index".format(self.window))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
See also
--------
pandas.Series.rolling
pandas.DataFrame.rolling
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Rolling, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply('roll_count', 'count')
return super(Rolling, self).count()
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_rolling_func('sum', args, kwargs)
return super(Rolling, self).sum(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_rolling_func('max', args, kwargs)
return super(Rolling, self).max(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_rolling_func('min', args, kwargs)
return super(Rolling, self).min(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_rolling_func('mean', args, kwargs)
return super(Rolling, self).mean(*args, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Rolling, self).median(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('std', args, kwargs)
return super(Rolling, self).std(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func('var', args, kwargs)
return super(Rolling, self).var(ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Rolling, self).skew(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Rolling, self).kurt(**kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Rolling, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Rolling, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='rolling')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Rolling, self).corr(other=other, pairwise=pairwise,
**kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provides a rolling groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super(RollingGroupby, self)._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provides expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
.. deprecated:: 0.18.0
Frequency to conform the data to before computing the statistic.
Specified as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
axis : int or string, default 0
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_attributes = ['min_periods', 'freq', 'center', 'axis']
def __init__(self, obj, min_periods=1, freq=None, center=False, axis=0,
**kwargs):
super(Expanding, self).__init__(obj=obj, min_periods=min_periods,
freq=freq, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
obj = self._selected_obj
if other is None:
return (max(len(obj), self.min_periods) if self.min_periods
else len(obj))
return (max((len(obj) + len(obj)), self.min_periods)
if self.min_periods else (len(obj) + len(obj)))
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.expanding.aggregate
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(Expanding, self).aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['count'])
def count(self, **kwargs):
return super(Expanding, self).count(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['apply'])
def apply(self, func, args=(), kwargs={}):
return super(Expanding, self).apply(func, args=args, kwargs=kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['sum'])
def sum(self, *args, **kwargs):
nv.validate_expanding_func('sum', args, kwargs)
return super(Expanding, self).sum(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['max'])
def max(self, *args, **kwargs):
nv.validate_expanding_func('max', args, kwargs)
return super(Expanding, self).max(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['min'])
def min(self, *args, **kwargs):
nv.validate_expanding_func('min', args, kwargs)
return super(Expanding, self).min(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['mean'])
def mean(self, *args, **kwargs):
nv.validate_expanding_func('mean', args, kwargs)
return super(Expanding, self).mean(*args, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['median'])
def median(self, **kwargs):
return super(Expanding, self).median(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['std'])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('std', args, kwargs)
return super(Expanding, self).std(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['var'])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func('var', args, kwargs)
return super(Expanding, self).var(ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['skew'])
def skew(self, **kwargs):
return super(Expanding, self).skew(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['kurt'])
def kurt(self, **kwargs):
return super(Expanding, self).kurt(**kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['quantile'])
def quantile(self, quantile, **kwargs):
return super(Expanding, self).quantile(quantile=quantile, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['cov'])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super(Expanding, self).cov(other=other, pairwise=pairwise,
ddof=ddof, **kwargs)
@Substitution(name='expanding')
@Appender(_doc_template)
@Appender(_shared_docs['corr'])
def corr(self, other=None, pairwise=None, **kwargs):
return super(Expanding, self).corr(other=other, pairwise=pairwise,
**kwargs)
class ExpandingGroupby(_GroupByMixin, Expanding):
"""
Provides a expanding groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Expanding
_bias_template = """
Parameters
----------
bias : boolean, default False
Use a standard estimation bias correction
"""
_pairwise_template = """
Parameters
----------
other : Series, DataFrame, or ndarray, optional
if not supplied then will default to self and produce pairwise output
pairwise : bool, default None
If False then only matching columns between self and other will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a MultiIndex DataFrame in the case of DataFrame inputs.
In the case of missing elements, only complete pairwise observations will
be used.
bias : boolean, default False
Use a standard estimation bias correction
"""
class EWM(_Rolling):
r"""
Provides exponential weighted functions
.. versionadded:: 0.18.0
Parameters
----------
com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
.. deprecated:: 0.18.0
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account
for imbalance in relative weightings (viewing EWMA as a moving average)
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
Returns
-------
a Window sub-classed for the particular operation
Examples
--------
>>> df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.ewm(com=0.5).mean()
B
0 0.000000
1 0.750000
2 1.615385
3 1.615385
4 3.670213
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
When adjust is True (default), weighted averages are calculated using
weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based
on relative positions. For example, the weights of x and y used in
calculating the final weighted average of [x, None, y] are 1-alpha and 1
(if adjust is True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_attributes = ['com', 'min_periods', 'freq', 'adjust', 'ignore_na', 'axis']
def __init__(self, obj, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
self.obj = obj
self.com = _get_center_of_mass(com, span, halflife, alpha)
self.min_periods = min_periods
self.freq = freq
self.adjust = adjust
self.ignore_na = ignore_na
self.axis = axis
self.on = None
@property
def _constructor(self):
return EWM
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
See also
--------
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='',
klass='Series/DataFrame'))
def aggregate(self, arg, *args, **kwargs):
return super(EWM, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _apply(self, func, how=None, **kwargs):
"""Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
func : string/callable to apply
how : string, default to None
.. deprecated:: 0.18.0
how to resample
Returns
-------
y : type of input argument
"""
blocks, obj, index = self._create_blocks(how=how)
results = []
for b in blocks:
try:
values = self._prep_values(b.values)
except TypeError:
results.append(b.values.copy())
continue
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, compat.string_types):
cfunc = getattr(_window, func, None)
if cfunc is None:
raise ValueError("we do not support this function "
"in _window.{0}".format(func))
def func(arg):
return cfunc(arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods))
results.append(np.apply_along_axis(func, self.axis, values))
return self._wrap_results(results, blocks, obj)
@Substitution(name='ewm')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""exponential weighted moving average"""
nv.validate_window_func('mean', args, kwargs)
return self._apply('ewma', **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def std(self, bias=False, *args, **kwargs):
"""exponential weighted moving stddev"""
nv.validate_window_func('std', args, kwargs)
return _zsqrt(self.var(bias=bias, **kwargs))
vol = std
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_bias_template)
def var(self, bias=False, *args, **kwargs):
"""exponential weighted moving variance"""
nv.validate_window_func('var', args, kwargs)
def f(arg):
return _window.ewmcov(arg, arg, self.com, int(self.adjust),
int(self.ignore_na), int(self.min_periods),
int(bias))
return self._apply(f, **kwargs)
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def cov(self, other=None, pairwise=None, bias=False, **kwargs):
"""exponential weighted sample covariance"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_cov(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
cov = _window.ewmcov(X._prep_values(), Y._prep_values(), self.com,
int(self.adjust), int(self.ignore_na),
int(self.min_periods), int(bias))
return X._wrap_result(cov)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_cov, pairwise=bool(pairwise))
@Substitution(name='ewm')
@Appender(_doc_template)
@Appender(_pairwise_template)
def corr(self, other=None, pairwise=None, **kwargs):
"""exponential weighted sample correlation"""
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
def _get_corr(X, Y):
X = self._shallow_copy(X)
Y = self._shallow_copy(Y)
def _cov(x, y):
return _window.ewmcov(x, y, self.com, int(self.adjust),
int(self.ignore_na),
int(self.min_periods),
1)
x_values = X._prep_values()
y_values = Y._prep_values()
with np.errstate(all='ignore'):
cov = _cov(x_values, y_values)
x_var = _cov(x_values, x_values)
y_var = _cov(y_values, y_values)
corr = cov / _zsqrt(x_var * y_var)
return X._wrap_result(corr)
return _flex_binary_moment(self._selected_obj, other._selected_obj,
_get_corr, pairwise=bool(pairwise))
# Helper Funcs
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) and
isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if (isinstance(arg1, (np.ndarray, ABCSeries)) and
isinstance(arg2, (np.ndarray, ABCSeries))):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index,
columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i],
arg2.iloc[:, j]))
# TODO: not the most efficient (perf-wise)
# though not bad code-wise
from pandas import Panel, MultiIndex, concat
with warnings.catch_warnings(record=True):
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
if len(p.items):
result = concat(
[p.iloc[i].T for i in range(len(p.items))],
keys=p.items)
else:
result = DataFrame(
index=MultiIndex(levels=[arg1.index, arg1.columns],
labels=[[], []]),
columns=arg2.columns,
dtype='float64')
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(
arg2.columns.names)
result.index = result.index.set_names(
arg1.index.names + arg1.columns.names)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def _get_center_of_mass(com, span, halflife, alpha):
valid_count = len([x for x in [com, span, halflife, alpha]
if x is not None])
if valid_count > 1:
raise ValueError("com, span, halflife, and alpha "
"are mutually exclusive")
# Convert to center of mass; domain checks ensure 0 < alpha <= 1
if com is not None:
if com < 0:
raise ValueError("com must satisfy: com >= 0")
elif span is not None:
if span < 1:
raise ValueError("span must satisfy: span >= 1")
com = (span - 1) / 2.
elif halflife is not None:
if halflife <= 0:
raise ValueError("halflife must satisfy: halflife > 0")
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif alpha is not None:
if alpha <= 0 or alpha > 1:
raise ValueError("alpha must satisfy: 0 < alpha <= 1")
com = (1.0 - alpha) / alpha
else:
raise ValueError("Must pass one of com, span, halflife, or alpha")
return float(com)
def _offset(window, center):
if not is_integer(window):
window = len(window)
offset = (window - 1) / 2. if center else 0
try:
return int(offset)
except:
return offset.astype(int)
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _zsqrt(x):
with np.errstate(all='ignore'):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
# Top-level exports
def rolling(obj, win_type=None, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
if win_type is not None:
return Window(obj, win_type=win_type, **kwds)
return Rolling(obj, **kwds)
rolling.__doc__ = Window.__doc__
def expanding(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return Expanding(obj, **kwds)
expanding.__doc__ = Expanding.__doc__
def ewm(obj, **kwds):
if not isinstance(obj, (ABCSeries, ABCDataFrame)):
raise TypeError('invalid type: %s' % type(obj))
return EWM(obj, **kwds)
ewm.__doc__ = EWM.__doc__
| gpl-2.0 |