repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/enrichment.py | 1 | 40209 | """
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import math
import random
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
import utils
import bam
from regions import AnnotationGene, AnnotationTranscript, AnnotationExon, RegionWriter, read_gff_file, get_exons, get_introns, gene_slide
import warnings
try:
from shutil import move
except:
from os import rename as move
"""
Differential expression and MA plot visualization module.
"""
def _region_from_dual(self, line):
try:
self.cluster_aux.clear()
self.cluster_aux.read_line(line)
strand = None
if self.stranded_analysis:
strand = self.cluster_aux.strand
ret = Region(self.cluster_aux.name, self.cluster_aux.start, self.cluster_aux.end, name2=self.cluster_aux.name2, strand=strand)
self.cluster_aux.clear()
return ret
except ValueError:
pass #discarding header
def __calc_reg_write(self, region_file, count, calculated_region):
if count > self.region_mintags:
region_file.write(calculated_region.write())
def calculate_region(self):
"""
Calculate a region file using the reads present in the both main files to analyze.
"""
self.logger.info('Generating regions...')
self.sorted_region_path = '%s/calcregion_%s.bed'%(self._output_dir(), os.path.basename(self.current_output_path))
region_file = open(self.sorted_region_path, 'wb')
if self.region_magic:
regwriter = RegionWriter(self.gff_file, region_file, self.region_magic, no_sort=self.no_sort, logger=self.logger, write_as=BED, galaxy_workarounds=self.galaxy_workarounds)
regwriter.write_regions()
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
if self.stranded_analysis:
calculate_region_stranded(self, dual_reader, region_file)
else:
calculate_region_notstranded(self, dual_reader, region_file)
region_file.flush()
def __cr_append(self, regions, region):
regions.append(region)
def calculate_region_notstranded(self, dual_reader, region_file):
calculated_region = Region()
readcount = 1
for line in dual_reader:
if not calculated_region: #first region only
calculated_region = _region_from_dual(self, line)
calculated_region.end += self.proximity
else:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if calculated_region.overlap(new_region):
calculated_region.join(new_region)
readcount += 1
else:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
calculated_region = new_region.copy()
readcount = 1
if calculated_region:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
def calculate_region_stranded(self, dual_reader, region_file):
temp_region_file = open(self.sorted_region_path, 'wb')
region_plus = Region()
region_minus = Region()
regions = []
numreads_plus = 1
numreads_minus = 1
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
for line in dual_reader:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if not (region_plus and new_region.strand == PLUS_STRAND):
region_plus = _region_from_dual(self, line)
elif not (region_plus and new_region.strand == PLUS_STRAND):
region_minus = _region_from_dual(self, line)
else:
if region_plus.overlap(new_region) and region_plus.strand == new_region.strand:
region_plus.join(new_region)
numreads_plus += 1
elif region_minus.overlap(new_region) and region_minus.strand == new_region.strand:
region_minus.join(new_region)
numreads_minus += 1
else:
if new_region.strand == region_plus.strand:
region_plus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_plus, region_plus)
region_plus = new_region.copy()
numreads_plus = 1
else:
region_minus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_minus, region_minus)
region_minus = new_region.copy()
numreads_minus = 1
if region_plus:
region_plus.end -= self.proximity
regions.append(region_plus)
if region_minus:
region_minus.end -= self.proximity
regions.append(region_minus)
regions.sort(key=lambda x:(x.name, x.start, x.end, x.strand))
for region in regions:
region_file.write(region.write())
def get_zscore(x, mean, sd):
if sd > 0:
return float(x-mean)/sd
else:
return 0 #This points are weird anyway
def read_interesting_regions(self, file_path):
regs = []
try:
regs_file = open(file_path, 'r')
for line in regs_file:
regs.append(line.strip())
except IOError as ioerror:
self.logger.warning("Interesting regions file not found")
return regs # memory inefficient if there's a large number of interesting regions
def plot_enrichment(self, file_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import *
from matplotlib import rcParams
rcParams.update({'font.size': 22})
rcParams['legend.fontsize'] = 14
#decide labels
if self.label1:
label_main = self.label1
else:
if self.real_control_path and self.real_experiment_path:
label_main = '%s VS %s'%(os.path.basename(self.real_experiment_path), os.path.basename(self.real_control_path))
else:
label_main = "A VS B"
if self.label2:
label_control = self.label2
else:
if self.replica_path:
label_control = '%s(A) VS %s(A)'%(os.path.basename(self.real_experiment_path), os.path.basename(self.replica_path))
else:
label_control = 'Background distribution'
#self.logger.info("Interesting regions path: %s" % (self.interesting_regions))
interesting_regs = []
if self.interesting_regions:
self.logger.info("Reading interesting regions...")
interesting_regs = read_interesting_regions(self, self.interesting_regions)
#self.logger.info("Interesting regions: %s" % (interesting_regs))
#self.logger.info("Plot path: %s" % (file_path))
interesting_A = []
interesting_M = []
#self.logger.info("disable_significant: %s" % (self.disable_significant_color))
A = []
A_prime = []
M = []
M_significant = []
A_significant = []
M_prime = []
A_medians = []
points = []
minus_points = []
all_points = []
figure(figsize=(14,22))
biggest_A = -sys.maxint #for drawing
smallest_A = sys.maxint #for drawing
biggest_M = 0 #for drawing
self.logger.info("Loading table...")
for line in open(file_path):
sline = line.split()
try:
enrich = dict(zip(enrichment_keys, sline))
# WARNING: for slide inter and slide intra: name2 = 'start:end' (no gene_id, FIXME?)
name2 = enrich['name2'].split(':')
gene_id = name2[0]
if len(name2) >= 2:
transcript_id = name2[1] # consider transcript_id? (exons)
else:
transcript_id = None
if gene_id in interesting_regs or transcript_id in interesting_regs:
interesting_M.append(float(enrich["M"]))
interesting_A.append(float(enrich["A"]))
biggest_A = max(biggest_A, float(enrich["A"]))
smallest_A = min(smallest_A, float(enrich["A"]))
biggest_M = max(biggest_M, abs(float(enrich["M"])))
biggest_A = max(biggest_A, float(enrich["A_prime"]))
smallest_A = min(smallest_A, float(enrich["A_prime"]))
biggest_M = max(biggest_M, abs(float(enrich["M_prime"])))
positive_point = self.zscore*float(enrich["sd"])+float(enrich["mean"])
negative_point = -self.zscore*float(enrich["sd"])+float(enrich["mean"])
A_median = float(enrich["A_median"])
all_points.append((A_median, positive_point, negative_point))
if abs(float(enrich["zscore"])) < self.zscore:
M.append(float(enrich["M"]))
A.append(float(enrich["A"]))
else:
M_significant.append(float(enrich["M"]))
A_significant.append(float(enrich["A"]))
M_prime.append(float(enrich["M_prime"]))
A_prime.append(float(enrich["A_prime"]))
except ValueError:
pass #to skip the header
all_points.sort(key= lambda x:x[0])
for t in all_points:
(A_medians.append(t[0]), points.append(t[1]), minus_points.append(t[2]))
if points:
margin = 1.1
A_medians.append(biggest_A*margin)
points.append(points[-1])
minus_points.append(minus_points[-1])
A_medians.insert(0, smallest_A)
points.insert(0, points[0])
minus_points.insert(0, minus_points[0])
self.logger.info("Plotting points...")
#Background plot
subplot(211, axisbg="lightyellow")
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A_prime, M_prime, '.', label=label_control, color = '#666666')
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
axhline(0, linestyle='--', color="grey", alpha=0.75)
leg = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4, mode="expand")
leg.get_frame().set_alpha(0.5)
#Experiment plot
subplot(212, axisbg="lightyellow")
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A, M, 'k.', label=label_main)
if self.disable_significant_color:
significant_marker = 'ko'
else:
significant_marker = 'ro'
plot(A_significant, M_significant, significant_marker, label="%s (significant)"%label_main)
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
if self.interesting_regions:
interesting_label = label_main + ' (interesting)'
plot(interesting_A, interesting_M, 'H', label=interesting_label, color='#00EE00') # plotting "interesting" regions
axhline(0, linestyle='--', color="grey", alpha=0.75)
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
leg2 = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4)
leg2.get_frame().set_alpha(0.7)
self._save_figure("enrichment_MA", width=500, height=2800)
else:
self.logger.warning("Nothing to plot.")
except ImportError:
if self.debug:
raise
__matplotlibwarn(self)
def __matplotlibwarn(self):
#FIXME move to utils.py or plotting module
self.logger.warning('Pyicos can not find an installation of matplotlib, so no plot will be drawn. If you want to get a plot with the correlation values, install the matplotlib library.')
def __calc_M(signal_a, signal_b):
return math.log(float(signal_a)/float(signal_b), 2)
def __calc_A(signal_a, signal_b):
return (math.log(float(signal_a), 2)+math.log(float(signal_b), 2))/2
def _calculate_MA(self, region_path, read_counts, factor = 1, replica_factor = 1, file_a_reader=None, file_b_reader=None, replica_reader=None):
tags_a = []
tags_b = []
numreads_background_1 = 0
numreads_background_2 = 0
total_reads_background_1 = 0
total_reads_background_2 = 0
self.logger.debug("Inside _calculate_MA")
self.regions_analyzed_count = 0
enrichment_result = [] #This will hold the name, start and end of the region, plus the A, M, 'A and 'M
if NOWRITE not in self.operations:
out_file = open(self.current_output_path, 'wb')
for region_line in open(region_path):
sline = region_line.split()
region_of_interest = self._region_from_sline(sline)
if region_of_interest:
region_a = None
replica = None
replica_tags = None
signal_a = -1
signal_b = -1
signal_background_1 = -1
signal_background_2 = -1
swap1 = Region()
swap2 = Region()
if read_counts:
signal_a = float(sline[6])
signal_b = float(sline[7])*factor
signal_background_1 = float(sline[8])
signal_background_2 = float(sline[9])*replica_factor
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
else:
self.logger.debug("Reading tags for %s ..."%region_of_interest)
if self.experiment_format == BAM:
tags_a = len(file_a_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
tags_b = len(file_b_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
tags_a = file_a_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
tags_b = file_b_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
if self.use_replica:
if self.experiment_format == BAM:
replica_tags = len(replica_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
replica_tags = replica_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
self.logger.debug("... done. tags_a: %s tags_b: %s"%(tags_a, tags_b))
#if we are using pseudocounts, use the union, use the intersection otherwise
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
signal_a = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_a, tags_a)
signal_b = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_b, tags_b)
self.already_norm = True
if not self.counts_file:
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
if self.use_replica:
replica = region_of_interest.copy()
#replica.add_tags(replica_tags)
numreads_background_1 = tags_a
numreads_background_2 = replica_tags
total_reads_background_1 = self.total_reads_a
total_reads_background_2 = self.total_reads_replica
signal_background_1 = signal_a
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.total_reads_replica, replica_tags)
else:
numreads_background_1 = 0
numreads_background_2 = 0
for i in range(0, tags_a+tags_b):
if random.uniform(0,2) > 1:
numreads_background_1 += 1
else:
numreads_background_2 += 1
total_reads_background_1 = total_reads_background_2 = self.average_total_reads
signal_background_1 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_1)
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_2)
#if there is no data in the replica or in the swap and we are not using pseudocounts, dont write the data
if signal_a > 0 and signal_b > 0 and signal_background_1 > 0 and signal_background_2 > 0 or self.use_MA:
if self.use_MA and not self.already_norm:
A = float(sline[10])
M = float(sline[11])
A_prime = float(sline[16])
M_prime = float(sline[17])
else:
if not self.already_norm: #TODO refractor
if self.len_norm: #read per kilobase in region
signal_a = 1e3*(float(signal_a)/len(region_of_interest))
signal_b = 1e3*(float(signal_b)/len(region_of_interest))
signal_background_1 = 1e3*(float(signal_background_1)/len(region_of_interest))
signal_background_2 = 1e3*(float(signal_background_2)/len(region_of_interest))
if self.n_norm: #per million reads in the sample
signal_a = 1e6*(float(signal_a)/self.total_reads_a)
signal_b = 1e6*(float(signal_b)/self.total_reads_b)
if self.use_replica:
signal_background_1 = signal_a
signal_background_2 = 1e6*(float(signal_background_2)/self.total_reads_replica)
else:
signal_background_1 = 1e6*(float(signal_background_1)/self.average_total_reads)
signal_background_2 = 1e6*(float(signal_background_2)/self.average_total_reads)
A = __calc_A(signal_a, signal_b)
M = __calc_M(signal_a, signal_b)
A_prime = __calc_A(signal_background_1, signal_background_2)
M_prime = __calc_M(signal_background_1, signal_background_2)
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
if NOWRITE not in self.operations:
out_file.write("%s\n"%("\t".join([region_of_interest.write().rstrip("\n"), str(signal_a), str(signal_b), str(signal_background_1), str(signal_background_2), str(A), str(M), str(self.total_reads_a), str(self.total_reads_b), str(tags_a), str(tags_b), str(A_prime), str(M_prime), str(total_reads_background_1), str(total_reads_background_2), str(numreads_background_1), str(numreads_background_2)])))
self.regions_analyzed_count += 1
self.logger.debug("LEAVING _calculate_MA")
if NOWRITE in self.operations:
return ""
else:
out_file.flush()
out_file.close()
# Outputting to HTML (if specified)
if self.html_output is not None:
self.logger.info("Generating HTML")
try:
from jinja2 import Environment, PackageLoader, Markup
except:
self.logger.error("Could not find the jinja2 library")
return out_file.name
loadr = PackageLoader('pyicoteolib', 'templates')
env = Environment(loader=loadr)
template = env.get_template('enrich_html.html')
def jinja_read_file(filename):
f = open(filename, 'r')
#for line in f:
# print line
txt = ''.join(f.readlines())
f.close()
return txt
env.globals['jinja_read_file'] = jinja_read_file
if self.galaxy_workarounds: # Galaxy changes the working directory when outputting multiple files
parent_dir = "./"
else:
parent_dir = os.sep.join(out_file.name.split(os.sep)[0:-1]) + "/"
plot_path = parent_dir + "enrichment_MA_" + out_file.name.split(os.sep)[-1] + ".png"
bed_path = parent_dir + out_file.name.split(os.sep)[-1]
html_file = open(self.html_output, 'w')
html_file.write(template.render({'page_title': 'Enrichment results', 'results_output': jinja_read_file(out_file.name), 'plot_path': plot_path, 'bed_path': bed_path}))
html_file.flush()
html_file.close()
return out_file.name
def _calculate_total_lengths(self):
msg = "Calculating enrichment in regions"
if self.counts_file:
self.sorted_region_path = self.counts_file
if (not self.total_reads_a or not self.total_reads_b or (not self.total_reads_replica and self.use_replica)) and not self.use_MA:
self.logger.info("... counting from counts file...")
self.total_reads_a = 0
self.total_reads_b = 0
if self.total_reads_replica:
self.total_reads_replica = 0
else:
self.total_reads_replica = 1
for line in open(self.counts_file):
try:
enrich = dict(zip(enrichment_keys, line.split()))
self.total_reads_a += float(enrich["signal_a"])
self.total_reads_b += float(enrich["signal_b"])
if self.use_replica:
self.total_reads_replica += float(enrich["signal_prime_2"])
except ValueError:
self.logger.debug("(Counting) skip header...")
else:
self.logger.info("... counting number of lines in files...")
if not self.total_reads_a:
if self.experiment_format == BAM:
self.total_reads_a = bam.size(self.current_experiment_path)
else:
self.total_reads_a = sum(1 for line in utils.open_file(self.current_experiment_path, self.experiment_format, logger=self.logger))
if not self.total_reads_b:
if self.experiment_format == BAM:
self.total_reads_b = bam.size(self.current_control_path)
else:
self.total_reads_b = sum(1 for line in utils.open_file(self.current_control_path, self.control_format, logger=self.logger))
if self.use_replica and not self.total_reads_replica:
if self.experiment_format == BAM:
self.total_reads_replica = bam.size(self.replica_path)
else:
self.total_reads_replica = sum(1 for line in utils.open_file(self.replica_path, self.experiment_format, logger=self.logger))
self.logger.debug("Number lines in experiment A: %s Experiment B: %s"%(self.total_reads_a, self.total_reads_b))
if self.use_replica:
msg = "%s using replicas..."%msg
else:
msg = "%s using swap..."%msg
self.logger.info(msg)
self.average_total_reads = (self.total_reads_a+self.total_reads_b)/2
def enrichment(self):
file_a_reader = file_b_reader = replica_reader = None
self.use_replica = (bool(self.replica_path) or (bool(self.counts_file) and self.use_replica_flag))
self.logger.debug("Use replica: %s"%self.use_replica)
if not USE_MA in self.operations:
_calculate_total_lengths(self)
if not self.counts_file:
file_a_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
file_b_reader = utils.read_fetcher(self.current_control_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.use_replica:
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.sorted_region_path:
self.logger.info('Using region file %s (%s)'%(self.region_path, self.region_format))
else:
calculate_region(self) #create region file semi automatically
self.total_regions = sum(1 for line in open(self.sorted_region_path))
self.logger.info("... analyzing regions, calculating normalized counts, A / M and replica or swap...")
self.already_norm = False
if self.use_MA:
ma_path = self.counts_file
else:
ma_path = self.sorted_region_path
out_path = _calculate_MA(self, ma_path, bool(self.counts_file), 1, 1, file_a_reader, file_b_reader, replica_reader)
self.already_norm = True
self.logger.debug("Already normalized: %s"%self.already_norm)
if self.tmm_norm:
if CHECK_REPLICAS in self.operations:
self.experiment_values = []
self.replica_values = []
self.logger.info("TMM Normalizing...")
tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, False)
replica_tmm_factor = 1
if self.use_replica:
replica_tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, True)
#move output file to old output
#use as input
old_output = '%s/notnormalized_%s'%(self._current_directory(), os.path.basename(self.current_output_path))
move(os.path.abspath(self.current_output_path), old_output)
out_path = _calculate_MA(self, old_output, True, tmm_factor, replica_tmm_factor, True) #recalculate with the new factor, using the counts again
if self.quant_norm:
self.logger.info("Full quantile normalization...")
signal_a = []
signal_prime_1 = []
enrich = []
for line in open(out_path):
sline = line.split()
enrich_line = dict(zip(enrichment_keys, sline))
enrich.append(enrich_line)
signal_a.append(float(enrich_line['signal_a']))
signal_prime_1.append(float(enrich_line['signal_prime_1']))
#full quantile normalization
signal_a.sort()
enrich.sort(key=lambda x:float(x['signal_b']))
quant_counts = open('%s/quantcounts_%s'%(self._current_directory(), os.path.basename(self.current_output_path)), 'w')
for i in range(len(enrich)):
enrich[i]['signal_b'] = signal_a[i]
self.logger.info("Full quantile normalization replica...")
#full quantile normalization of the replica
signal_prime_1.sort()
enrich.sort(key=lambda x:float(x['signal_prime_2']))
for i in range(len(enrich)):
enrich[i]['signal_prime_2'] = signal_prime_1[i]
quant_counts.write("%s\n"%"\t".join(str(enrich[i][key]) for key in enrichment_keys[:20])) #write the lines
quant_counts.flush()
out_path = _calculate_MA(self, quant_counts.name, True, 1, 1, True) #recalculate with the new factor, using the counts again
self._manage_temp_file(quant_counts.name)
self.logger.info("%s regions analyzed."%self.regions_analyzed_count)
if not NOWRITE in self.operations:
self.logger.info("Enrichment result saved to %s"%self.current_output_path)
if CHECK_REPLICAS in self.operations:
check_replica(self)
return out_path
def _sub_tmm(counts_a, counts_b, reads_a, reads_b):
return (counts_a-reads_a)/(counts_a*reads_a) + (counts_b-reads_b)/(counts_b*reads_b)
def calc_tmm_factor(self, file_counts, total_regions, replica):
if replica:
signal_1 = "signal_prime_1"
signal_2 = "signal_prime_2"
M = "M_prime"
reads_2 = self.total_reads_replica
else:
signal_1 = "signal_a"
signal_2 = "signal_b"
M = "M"
reads_2 = self.total_reads_b
values_list = []
#read the file inside the values_list
for line in open(file_counts):
sline = line.split()
values_list.append(dict(zip(enrichment_keys, sline)))
a_trim_number = int(round(total_regions*self.a_trim))
#discard the bad A
self.logger.debug("Removing the worst A (%s regions, %s percent)"%(a_trim_number, self.a_trim*100))
values_list.sort(key=lambda x:float(x["A"])) #sort by A
for i in range (0, a_trim_number):
values_list.pop(0)
values_list.sort(key=lambda x:float(x[M])) #sort by M
m_trim_number = int(round(total_regions*(self.m_trim/2))) #this number is half the value of the flag, because we will trim half below, and half over
#remove on the left
for i in range(0, m_trim_number):
values_list.pop(0)
#remove on the right
for i in range(0, m_trim_number):
values_list.pop(-1)
#now calculate the normalization factor
arriba = 0
abajo = 0
for value in values_list:
w = _sub_tmm(float(value[signal_1]), float(value[signal_2]), self.total_reads_a, reads_2)
arriba += w*float(value[M])
abajo += w
try:
factor = 2**(arriba/abajo)
except ZeroDivisionError:
self.logger.warning("Division by zero, TMM factor could not be calculated.")
factor = 1
if replica:
self.logger.info("Replica TMM Normalization Factor: %s"%factor)
else:
self.logger.info("TMM Normalization Factor: %s"%factor)
return factor
def __load_enrichment_result(values_path):
ret = []
for line in open(values_path):
sline = line.split()
try:
float(sline[1])
ret.append(dict(zip(enrichment_keys, sline)))
except ValueError:
pass
return ret
def calculate_zscore(self, values_path):
num_regions = sum(1 for line in open(values_path))
bin_size = int(self.binsize*num_regions)
if bin_size < 50:
self.logger.warning("The bin size results in a sliding window smaller than 50, adjusting window to 50 in order to get statistically meaningful results.")
bin_size = 50
bin_step = max(1, int(round(self.bin_step*bin_size)))
self.logger.info("Enrichment window calculation using a sliding window size of %s, sliding with a step of %s"%(bin_size, bin_step))
self.logger.info("... calculating zscore...")
enrichment_result = __load_enrichment_result(values_path)
enrichment_result.sort(key= lambda x:(float(x["A_prime"])))
self.logger.debug("Number of loaded counts: %s"%len(enrichment_result))
self.points = []
#get the standard deviations
for i in range(0, num_regions-bin_size+bin_step, bin_step):
#get the slice
if i+bin_size < num_regions:
result_chunk = enrichment_result[i:i+bin_size]
else:
result_chunk = enrichment_result[i:] #last chunk
#retrieve the values
mean_acum = 0
a_acum = 0
Ms_replica = []
for entry in result_chunk:
mean_acum += float(entry["M_prime"])
a_acum += float(entry["A_prime"])
Ms_replica.append(float(entry["M_prime"]))
#add them to the points of mean and sd
mean = mean_acum/len(result_chunk)
sd = math.sqrt((sum((x - mean)**2 for x in Ms_replica))/len(Ms_replica))
#the A median
A_median = a_acum / len(result_chunk)
self.points.append([A_median, mean, sd]) #The A asigned to the window, the mean and the standard deviation
#self.logger.debug("Window of %s length, with A median: %s mean: %s sd: %s"%(len(result_chunk), self.points[-1][0], self.points[-1][1], self.points[-1][2], len(self.points)))
#update z scores
for entry in enrichment_result:
entry["A_median"] = 0
entry["mean"] = 0
entry["sd"] = 0
entry["zscore"] = 0
closest_A = sys.maxint
sd_position = 0
for i in range(0, len(self.points)):
new_A = self.points[i][0]
if new_A != closest_A: #skip repeated points
if abs(closest_A - float(entry["A"])) >= abs(new_A - float(entry["A"])):
closest_A = new_A
sd_position = i
else:
break #already found, no need to go further since the points are ordered
entry["A_median"] = closest_A
if self.points: #only calculate if there where windows...
__sub_zscore(self.sdfold, entry, self.points[sd_position])
if not self.points: # ... otherwise give a warning
self.logger.warning("Insufficient number of regions analyzed (%s), z-score values could not be calculated"%num_regions)
enrichment_result.sort(key=lambda x:(x["name"], int(x["start"]), int(x["end"])))
old_file_path = '%s/before_zscore_%s'%(self._current_directory(), os.path.basename(values_path)) #create path for the outdated file
move(os.path.abspath(values_path), old_file_path) #move the file
new_file = file(values_path, 'w') #open a new file in the now empty file space
if not self.skip_header:
new_file.write('\t'.join(enrichment_keys))
new_file.write('\n')
for entry in enrichment_result:
new_file.write("\t".join(str(entry[key]) for key in enrichment_keys)+"\n")
self._manage_temp_file(old_file_path)
return values_path
def __sub_zscore(sdfold, entry, point):
entry["mean"] = str(point[1])
entry["sd"] = str(point[2])
entry["zscore"] = str(get_zscore(float(entry["M"]), float(entry["mean"]), sdfold*float(entry["sd"])))
def check_replica(self):
#discard everything below the flag
new_experiment = []
new_replica = []
min_value = sys.maxint
max_value = -sys.maxint
for i in range(len(self.replica_values)):
if self.experiment_values[i] > self.count_filter and self.replica_values[i] > self.count_filter:
new_experiment.append(math.log(self.experiment_values[i], 2))
new_replica.append(math.log(self.replica_values[i], 2))
min_value = min(min_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
max_value = max(max_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
#print self.replica_values
self.experiment_values = new_experiment
self.replica_values = new_replica
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, show, xlabel, ylabel, axhline, axis, clf, text, title, xlim, ylim
except:
__matplotlibwarn(self)
return 0
clf()
r_squared = utils.pearson(self.experiment_values, self.replica_values)**2
text(min_value+abs(max_value)*0.1, max_value-abs(max_value)*0.2, r'Pearson $R^2$= %s'%round(r_squared, 3), fontsize=18, bbox={'facecolor':'yellow', 'alpha':0.5, 'pad':10})
xlabel("log2(%s)"%self.experiment_label, fontsize=18)
ylabel("log2(%s)"%self.replica_label, fontsize=18)
xlim(min_value, max_value)
ylim(min_value, max_value)
title(self.title_label, fontsize=24)
plot(self.experiment_values, self.replica_values, '.')
self._save_figure("check_replica")
def check_replica_correlation(self):
"No usado, de momento"
min_tags = 20
experiment_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
correlations_acum = 0
num_correlations = 0
for region_line in open(self.region_path):
sline = region_line.split()
region_experiment = self._region_from_sline(sline)
region_replica = region_experiment.copy()
tags_experiment = experiment_reader.get_overlaping_clusters(region_experiment, overlap=1)
tags_replica = replica_reader.get_overlaping_clusters(region_experiment, overlap=1)
count_experiment = len(tags_experiment)
count_replica = len(tags_replica)
correlations = []
if count_experiment+count_replica > min_tags:
region_experiment.add_tags(tags_experiment, clusterize=True)
region_replica.add_tags(tags_replica, clusterize=True)
num_correlations += 1
correlation = utils.pearson(region_experiment.get_array(), region_replica.get_array())
correlations_acum += max(0, correlation)
correlations.append(correlation)
print correlations_acum/num_correlations
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, boxplot, show, legend, figure, xlabel, ylabel, subplot, axhline, axis
except:
__matplotlibwarn(self)
return 0
print correlations
boxplot(correlations)
self._save_figure("check_replica") | gpl-3.0 |
jenshnielsen/basemap | examples/maskoceans.py | 4 | 1922 | from mpl_toolkits.basemap import Basemap, shiftgrid, maskoceans, interp
import numpy as np
import matplotlib.pyplot as plt
# example showing how to mask out 'wet' areas on a contour or pcolor plot.
topodatin = np.loadtxt('etopo20data.gz')
lonsin = np.loadtxt('etopo20lons.gz')
latsin = np.loadtxt('etopo20lats.gz')
# shift data so lons go from -180 to 180 instead of 20 to 380.
topoin,lons1 = shiftgrid(180.,topodatin,lonsin,start=False)
lats1 = latsin
fig=plt.figure()
# setup basemap
m=Basemap(resolution='l',projection='lcc',lon_0=-100,lat_0=40,width=8.e6,height=6.e6)
lons, lats = np.meshgrid(lons1,lats1)
x, y = m(lons, lats)
# interpolate land/sea mask to topo grid, mask ocean values.
# output may look 'blocky' near coastlines, since data is at much
# lower resolution than land/sea mask.
topo = maskoceans(lons, lats, topoin)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (original grid)')
fig=plt.figure()
# interpolate topo data to higher resolution grid (to better match
# the land/sea mask). Output looks less 'blocky' near coastlines.
nlats = 3*topoin.shape[0]
nlons = 3*topoin.shape[1]
lons = np.linspace(-180,180,nlons)
lats = np.linspace(-90,90,nlats)
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
topo = interp(topoin,lons1,lats1,lons,lats,order=1)
# interpolate land/sea mask to topo grid, mask ocean values.
topo = maskoceans(lons, lats, topo)
# make contour plot (ocean values will be masked)
CS=m.contourf(x,y,topo,np.arange(-300,3001,50),cmap=plt.cm.jet,extend='both')
#im=m.pcolormesh(x,y,topo,cmap=plt.cm.jet,vmin=-300,vmax=3000)
# draw coastlines.
m.drawcoastlines()
plt.title('ETOPO data with marine areas masked (data on finer grid)')
plt.show()
| gpl-2.0 |
eickenberg/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
broadinstitute/cms | cms/power/power_func.py | 1 | 8625 | ## functions for analyzing empirical/simulated CMS output
## last updated 09.14.2017 vitti@broadinstitute.org
import matplotlib as mp
mp.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import percentileofscore
###################
## DEFINE SCORES ##
###################
def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",):
'''adapted from run_likes_func.py'''
writefile = open(writefilename, 'w')
for score in ['ihs', 'nsl', 'delihh']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
for score in ['xpehh', 'fst', 'deldaf']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
writefile.close()
print("wrote to: " + writefilename)
return
###############
## REGION ID ##
###############
def get_window(istart, physpos, scores, windowlen = 100000):
window_scores = [scores[istart]]
startpos = physpos[istart]
pos = startpos
iscore = istart
while pos < (startpos + windowlen):
iscore += 1
if iscore >= len(scores):
break
window_scores.append(scores[iscore])
pos = physpos[iscore]
#print(str(pos) + " " + str(startpos))
return window_scores
def check_outliers(scorelist, cutoff = 3):
numscores = len(scorelist)
outliers = [item for item in scorelist if item > cutoff]
numoutliers = len(outliers)
percentage = (float(numoutliers) / float(numscores)) * 100.
return percentage
def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000):
'''
previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate
'''
#check window defined by each snp as starting point
rep_percentages = []
numSnps = len(physpos)
numWindows = 0
#get exhaustive windows and stop at chrom edge
for isnp in range(numSnps):
if physpos[isnp] + windowlen < totalchrlen:
numWindows +=1
else:
#print(str(physpos[isnp]) + "\t")
break
for iPos in range(numWindows):
window_scores = get_window(iPos, physpos, scores, windowlen)
percentage = check_outliers(window_scores, cutoff)
rep_percentages.append(percentage)
return rep_percentages
def merge_windows(chrom_signif, windowlen, maxGap = 100000):
print('should implement this using bedtools')
starts, ends = [], []
contig = False
this_windowlen = 0
starting_pos = 0
if len(chrom_signif) > 0:
for i_start in range(len(chrom_signif) - 1):
if not contig:
starts.append(chrom_signif[i_start])
this_windowlen = windowlen #unmerged, default
starting_pos = chrom_signif[i_start]
if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
#or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp
elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap):
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
else:
contig = False
if not contig:
windowend = chrom_signif[i_start] + windowlen
ends.append(windowend)
if contig: #last region is overlapped by its predecssor
ends.append(chrom_signif[-1] + windowlen)
else:
starts.append(chrom_signif[-1])
ends.append(chrom_signif[-1] + windowlen)
assert len(starts) == len(ends)
return starts, ends
##########################
## POWER & SIGNIFICANCE ##
##########################
def calc_pr(all_percentages, threshhold):
numNeutReps_exceedThresh = 0
totalnumNeutReps = len(all_percentages)
for irep in range(totalnumNeutReps):
if len(all_percentages[irep]) != 0:
if max(all_percentages[irep]) > threshhold:
numNeutReps_exceedThresh +=1
numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps)
if totalnumNeutReps != 0:
pr = numNeutReps_exceedThresh / totalnumNeutReps
else:
pr = 0
print('ERROR; empty set')
return pr
def get_causal_rank(values, causal_val):
if np.isnan(causal_val):
return(float('nan'))
assert(causal_val in values)
cleanvals = []
for item in values:
if not np.isnan(item) and not np.isinf(item):
cleanvals.append(item)
values = cleanvals
values.sort()
values.reverse()
causal_rank = values.index(causal_val)
return causal_rank
def get_cdf_from_causal_ranks(causal_ranks):
numbins = max(causal_ranks) #? heuristic
counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck
cdf = np.cumsum(counts)
return bins, cdf
def get_pval(all_simscores, thisScore):
r = np.searchsorted(all_simscores,thisScore)
n = len(all_simscores)
pval = 1. - ((r + 1.) / (n + 1.))
if pval > 0:
#pval *= nSnps #Bonferroni
return pval
else:
#print("r: " +str(r) + " , n: " + str(n))
pval = 1. - (r/(n+1))
#pval *= nSnps #Bonferroni
return pval
###############
## VISUALIZE ##
###############
def quick_plot(ax, pos, val, ylabel,causal_index=-1):
ax.scatter(pos, val, s=.8)
if causal_index != -1:
ax.scatter(pos[causal_index], val[causal_index], color='r', s=4)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize('6')
ax.set_ylabel(ylabel, fontsize='6')
#ax.set_xlim([0, 1500000]) #make flexible?
ax.yaxis.set_label_position('right')
#ax.set_ylim([min(val), max(val)])
return ax
def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000):
#print(allvals)
#get rid of nans and infs
#cleanvals = [item for item in allvals if not np.isnan(item)]
#allvals = cleanvals
allvals = np.array(allvals)
allvals = allvals[~np.isnan(allvals)]
allvals = allvals[~np.isinf(allvals)]
#allvals = list(allvals)
#print(allvals)
print("percentile for score = 10: " + str(percentileofscore(allvals, 10)))
print("percentile for score = 15: " + str(percentileofscore(allvals, 15)))
if len(allvals) > 0:
f, ax = plt.subplots(1)
ax.hist(allvals, bins=numBins)
plt.savefig(savefilename)
print('plotted to ' + savefilename)
return
def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True):
#neut_rep_scores.sort()
#print('sorted neutral scores...')
lastpos = 0
for chrom in range(1,23):
ichrom = chrom-1
if ichrom%2 == 0:
plotcolor = "darkblue"
else:
plotcolor = "lightblue"
if zscores == True:
#http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1
#Z SCORE cf SG email 103116
#pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]]
pvalues = []
for item in emp_scores[ichrom]:
if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant
pval = 1
else:
#print('scipy')
#sys.exit()
pval = scipy.stats.norm.sf(abs(item))
pvalues.append(pval)
#else:
# pval = get_pval(neut_rep_scores, item)
#pvalues.append(pval)
print("calculated pvalues for chrom " + str(chrom))
chrom_pos = range(lastpos, lastpos + len(pvalues))
logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues]
ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5)
lastpos = chrom_pos[-1]
else:
chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom]))
ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5)
lastpos = chrom_pos[-1]
return ax
def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom):
''' makes a figure more like in Karlsson 2013 instead of Grossman 2013'''
ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black")
ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal')
labels = ax.get_yticklabels()
ax.set_yticklabels(labels, fontsize=6)
ax.set_axis_bgcolor('LightGray')
return ax
| bsd-2-clause |
ishanic/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
appapantula/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8574 | # (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from matplotlib.axes import Axes
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert result == expected
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert result == expected
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert result == expected
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert result == expected
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert result == expected
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert result == expected
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert result == expected
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert result == expected
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert result == expected
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert result == expected
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert result == expected
| lgpl-3.0 |
sevenian3/ChromaStarPy | LevelPopsGasServer.py | 1 | 55996 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 14:13:47 2017
@author: ishort
"""
import math
import Useful
import ToolBox
#import numpy
#JB#
#from matplotlib.pyplot import plot, title, show, scatter
#storage for fits (not all may be used)
uw = []
uwa = []
uwb = []
uwStage = []
uwbStage = []
uwu = []
uwl = []
uua=[]
uub=[]
"""
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
#holds the five temperature at which we have partition function data
"""
masterTemp = [130, 500, 3000, 8000, 10000]
#JB#
#def levelPops(lam0In, logNStage, chiL, log10UwStage, gwL, numDeps, temp):
def levelPops(lam0In, logNStage, chiL, logUw, gwL, numDeps, temp):
""" Returns depth distribution of occupation numbers in lower level of b-b transition,
// Input parameters:
// lam0 - line centre wavelength in nm
// logNStage - log_e density of absorbers in relevent ion stage (cm^-3)
// logFlu - log_10 oscillator strength (unitless)
// chiL - energy of lower atomic E-level of b-b transition in eV
// Also needs atsmopheric structure information:
// numDeps
// temp structure """
c = Useful.c()
logC = Useful.logC()
k = Useful.k()
logK = Useful.logK()
logH = Useful.logH()
logEe = Useful.logEe()
logMe = Useful.logMe()
ln10 = math.log(10.0)
logE = math.log10(math.e); #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#//double logNl = logNlIn * ln10; // Convert to base e
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double thisLogUw, Ttheta;
thisLogUw = 0.0 # //default initialization
#logUw = [ 0.0 for i in range(5) ]
logE10 = math.log(10.0)
#print("log10UwStage ", log10UwStage)
#for kk in range(len(logUw)):
# logUw[kk] = logE10*log10UwStage[kk] #// lburns new loop
logGwL = math.log(gwL)
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#////For testing with Ca II lines using gS3 internal line list only:
#//boolean ionized = true;
#//if (ionized) {
#// //System.out.println("ionized, doing chiL - chiI: " + ionized);
#// // chiL = chiL - chiI;
#// chiL = chiL - 6.113;
#// }
#// //
#//Log of line-center wavelength in cm
logLam0 = math.log(lam0In) #// * 1.0e-7);
#// energy of b-b transition
logTransE = logH + logC - logLam0 #//ergs
if (chiL <= 0.0):
chiL = 1.0e-49
logChiL = math.log(chiL) + Useful.logEv() #// Convert lower E-level from eV to ergs
logBoltzFacL = logChiL - Useful.logK() #// Pre-factor for exponent of excitation Boltzmann factor
boltzFacL = math.exp(logBoltzFacL)
boltzFacGround = 0.0 / k #//I know - its zero, but let's do it this way anyway'
#// return a 1D numDeps array of logarithmic number densities
#// level population of lower level of bb transition (could be in either stage I or II!)
logNums = [ 0.0 for i in range(numDeps)]
#double num, logNum, expFac;
#JB#
#print("thisLogUw:",numpy.shape(logUw))
logUwFit = ToolBox.cubicFit(masterTemp,logUw)#u(T) fit
uw.append(logUwFit)
#JB#
for id in range(numDeps):
#//Determine temperature dependenet partition functions Uw:
#Ttheta = 5040.0 / temp[0][id]
#//NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
"""
if (Ttheta >= 1.0):
thisLogUw = logUw[0]
if (Ttheta <= 0.5):
thisLogUw = logUw[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUw = ( logUw[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) \
+ ( logUw[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
thisLogUw = ToolBox.valueFromFit(logUwFit,thisTemp)#u(T) value extrapolated
#JB#
if (thisTemp >= 10000.0):
thisLogUw = logUw[4]
if (thisTemp <= 130.0):
thisLogUw = logUw[0]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUw = logUw[1] * (thisTemp - 130)/(500 - 130) \
+ logUw[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUw = logUw[2] * (thisTemp - 500)/(3000 - 500) \
+ logUw[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUw = logUw[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUw[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUw = logUw[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUw[3] * (10000 - thisTemp)/(10000 - 8000)
"""
#print("logUw ", logUw, " thisLogUw ", thisLogUw)
#//System.out.println("LevPops: ionized branch taken, ionized = " + ionized);
#// Take stat weight of ground state as partition function:
logNums[id] = logNStage[id] - boltzFacL / temp[0][id] + logGwL - thisLogUw #// lower level of b-b transition
#print("LevelPopsServer.stagePops id ", id, " logNStage[id] ", logNStage[id], " boltzFacL ", boltzFacL, " temp[0][id] ", temp[0][id], " logGwL ", logGwL, " thisLogUw ", thisLogUw, " logNums[id] ", logNums[id]);
#// System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id]: " + id + " "
#// + Math.exp(logNums[0][id]) + " "
#// + Math.exp(logNums[1][id]) + " "
#// + Math.exp(logNums[2][id]) + " "
#// + Math.exp(logNums[3][id]));
#//System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id], logNums[4][id]: " + id + " "
#// + logE * (logNums[0][id]) + " "
#// + logE * (logNums[1][id]) + " "
#// + logE * (logNums[2][id]) + " "
# // + logE * (logNums[3][id]) + " "
#// + logE * (logNums[4][id]) );
#//System.out.println("LevelPops: id, logIonFracI, logIonFracII: " + id + " " + logE*logIonFracI + " " + logE*logIonFracII
#// + "logNum, logNumI, logNums[0][id], logNums[1][id] "
#// + logE*logNum + " " + logE*logNumI + " " + logE*logNums[0][id] + " " + logE*logNums[1][id]);
#//System.out.println("LevelPops: id, logIonFracI: " + id + " " + logE*logIonFracI
#// + "logNums[0][id], boltzFacL/temp[0][id], logNums[2][id]: "
#// + logNums[0][id] + " " + boltzFacL/temp[0][id] + " " + logNums[2][id]);
#//id loop
#stop
#print (uw)
return logNums
#//This version - ionization equilibrium *WITHOUT* molecules - logNum is TOTAL element population
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops(logNum, Ne, chiIArr, logUw, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine WITHOUT molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
//
"""
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
thisLogUwA = 0.0 #// element A
#thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
#JB#
uua=[]
#uub=[]
#qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
#end method levelPops
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops2(logNum, Ne, chiIArr, logUw, \
numMols, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
ifMols = True
if (numMols == 0):
ifMols = False
numMols = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 19.0 #//eV
#//Molecular partition functions - default initialization:
#double[] thisLogUwB = new double[numMols];
thisLogUwB = [ 0.0 for i in range(numMols) ]
for iMol in range(numMols):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
if (numMols > 0):
for kk in range(len(logUwA)):
logUwA[kk] = logUw[0][kk]
#// lburns
#//}
#//// Molecular partition functions:
#//Molecular dissociation Boltzmann factors:
boltzFacIAB = [ 0.0 for i in range(numMols) ]
logMolSahaFac = [ 0.0 for i in range(numMols) ]
#//if (numMols > 0){
#double logDissE, logBoltzFacIAB;
for iMol in range(numMols):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//console.log("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//}
#// For molecular species:
logSahaMol = [ 0.0 for i in range(numMols) ]
invSahaMol = [ 0.0 for i in range(numMols) ]
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for iMol in range(numMols):
currentUwBArr=list(logUwB[iMol])#u(T) determined values
UwBFit = ToolBox.cubicFit(masterTemp,currentUwBArr)#u(T) fit
uub.append(UwBFit)
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
for iMol in range(numMols):
thisLogUwB[iMol] = ToolBox.valueFromFit(uub[iMol],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][4]
for iMol in range(numMols):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#// iMol loop
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Molecular Saha factors:
for iMol in range(numMols):
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUwB[iMol] + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//molecular contribution
if (ifMols == True):
for iMol in range(numMols):
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
def stagePops3(logNum, Ne, chiIArr, logUw, numDeps, temp):
#Version for ChromaStarPyGas: logNum is now *neutral stage* population from Phil
# Bennett's GAS package
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent neutral stage number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
#logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
#logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
logNums[0][id] = logNum[id]
for iStg in range(1, numStages):
#print("iStg ", iStg)
thisLogSaha = logSahaFac - logNe - (boltzFacI[iStg-1] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg] - thisLogUw[iStg-1]
#saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
logNums[iStg][id] = logNums[iStg-1][id] + thisLogSaha
#//id loop
return logNums;
#//end method stagePops
#def sahaRHS(chiI, log10UwUArr, log10UwLArr, temp):
def sahaRHS(chiI, logUwU, logUwL, temp):
"""RHS of partial pressure formulation of Saha equation in standard form (N_U*P_e/N_L on LHS)
// Returns depth distribution of LHS: Phi(T) === N_U*P_e/N_L (David Gray notation)
// Input parameters:
// chiI - ground state ionization energy of lower stage
// log10UwUArr, log10UwLArr - array of temperature-dependent partition function for upper and lower ionization stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
//
// Atomic element "A" is the one whose ionization fractions are being computed
// Element "B" refers to array of other species with which A forms molecules "AB" """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUwU = 0.0
thisLogUwL = 0.0
logE10 = math.log(10.0)
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#logUwU = [0.0 for i in range(5)]
#logUwL = [0.0 for i in range(5)]
for kk in range(len(logUwL)):
logUwU[kk] = logUwL[kk]
# logUwL[kk] = logE10*log10UwLArr[kk]
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
#double boltzFacI;
logChiI = math.log(chiI) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI = math.exp(logBoltzFacI)
#//Extra factor of k to get k^5/2 in the P_e formulation of Saha Eq.
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) + Useful.logK()
#//double[] logLHS = new double[numDeps];
#double logLHS;
#// For atomic ionization stages:
#double logSaha, saha, expFac;
#// for (int id = 0; id < numDeps; id++) {
#//
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0]
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (Ttheta <= 0.5):
thisLogUwU = logUwU[1]
thisLogUwL = logUwL[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwU = ( logUwU[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwU[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
thisLogUwL = ( logUwL[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwL[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
currentUwUArr=list(logUwU)#u(T) determined values
UwUFit = ToolBox.cubicFit(masterTemp,currentUwUArr)#u(T) fit
thisLogUwU = ToolBox.valueFromFit(UwUFit,thisTemp)#u(T) value extrapolated
currentUwLArr=list(logUwL)#u(T) determined values
UwLFit = ToolBox.cubicFit(masterTemp,currentUwLArr)#u(T) fit
thisLogUwL = ToolBox.valueFromFit(UwLFit,thisTemp)#u(T) value extrapolated
#JB#
#will need to do this one in Main as it goes through its own loop of temp
#if thisTemp == superTemp[0][len(superTemp[0])]:
# uwu.append(UwUFit)
# uwl.append(UwLFit)
#
#JB#
if (thisTemp <= 130.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (thisTemp >= 10000.0):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwU = logUwU[1] * (thisTemp - 130)/(500 - 130) \
+ logUwU[0] * (500 - thisTemp)/(500 - 130)
thisLogUwL = logUwL[1] * (thisTemp - 130)/(500 - 130) \
+ logUwL[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwU = logUwU[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwU[1] * (3000 - thisTemp)/(3000 - 500)
thisLogUwL = logUwL[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwL[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwU = logUwU[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwU[2] * (8000 - thisTemp)/(8000 - 3000)
thisLogUwL = logUwL[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwL[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwU = logUwU[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwU[3] * (10000 - thisTemp)/(10000 - 8000)
thisLogUwL = logUwL[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwL[3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
#//Ionization stage Saha factors:
#//Need T_kin^5/2 in the P_e formulation of Saha Eq.
logSaha = logSahaFac - (boltzFacI /temp[0]) + (5.0 * temp[1] / 2.0) + thisLogUwU - thisLogUwL
#// saha = Math.exp(logSaha);
#//logLHS[id] = logSaha;
logLHS = logSaha;
#// } //id loop
return logLHS;
#JB
#return [logLHS,[[UwUFit,thisLogUwU],[UwLFit,thisLogUwL]]]
#//
# } //end method sahaRHS
#def molPops(nmrtrLogNumB, nmrtrDissE, log10UwA, nmrtrLog10UwB, nmrtrLogQwAB, nmrtrLogMuAB, \
# numMolsB, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# logGroundRatio, numDeps, temp):
def molPops(nmrtrLogNumB, nmrtrDissE, logUwA, nmrtrLogUwB, nmrtrLogQwAB, nmrtrLogMuAB, \
numMolsB, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
logGroundRatio, numDeps, temp):
# line 1: //species A data - ionization equilibrium of A
# //data for set of species "B" - molecular equlibrium for set {AB}
"""Diatomic molecular equilibrium routine that accounts for molecule formation:
// Returns depth distribution of molecular population
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
//
// Atomic element "A" is the one kept on the LHS of the master fraction, whose ionization fractions are included
// in the denominator of the master fraction
// Element "B" refers to array of other sintpecies with which A forms molecules "AB" """
logE = math.log10(math.e) #// for debug output
#//System.out.println("molPops: nmrtrDissE " + nmrtrDissE + " log10UwA " + log10UwA[0] + " " + log10UwA[1] + " nmrtrLog10UwB " +
#// nmrtrLog10UwB[0] + " " + nmrtrLog10UwB[1] + " nmrtrLog10QwAB " + logE*nmrtrLogQwAB[2] + " nmrtrLogMuAB " + logE*nmrtrLogMuAB
#// + " numMolsB " + numMolsB + " dissEArr " + dissEArr[0] + " log10UwBArr " + log10UwBArr[0][0] + " " + log10UwBArr[0][1] + " log10QwABArr " +
#// logE*logQwABArr[0][2] + " logMuABArr " + logE*logMuABArr[0]);
#//System.out.println("Line: nmrtrLog10UwB[0] " + logE*nmrtrLog10UwB[0] + " nmrtrLog10UwB[1] " + logE*nmrtrLog10UwB[1]);
ln10 = math.log(10.0)
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
logE10 = math.log(10.0)
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
if (numMolsB == 0):
numMolsB = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 29.0 #//eV
#//var molPops = function(logNum, numeratorLogNumB, numeratorDissE, numeratorLog10UwA, numeratorLog10QwAB, numeratorLogMuAB, //species A data - ionization equilibrium of A
#//Molecular partition functions - default initialization:
thisLogUwB = [0.0 for i in range(numMolsB)]
for iMol in range(numMolsB):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
nmrtrThisLogUwB = 0.0 #// element A
thisLogQwAB = math.log(300.0)
nmrtrThisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#logUwA = [0.0 for i in range(5)]
#nmrtrLogUwB = [0.0 for i in range(5)]
#for kk in range(len(logUwA)):
#logUwA[kk] = logE10*log10UwA[kk]
#nmrtrLogUwB[kk] = logE10*nmrtrLog10UwB[kk]
#// lburns
#// Array of elements B for all molecular species AB:
#double[][] logUwB = new double[numMolsB][2];
#logUwB = [ [ 0.0 for i in range(5) ] for j in range(numMolsB) ]
#//if (numMolsB > 0){
#for iMol in range(numMolsB):
# for kk in range(5):
# logUwB[iMol][kk] = logE10*log10UwBArr[iMol][kk]
# // lburns new loop
#//}
#// Molecular partition functions:
#// double nmrtrLogQwAB = logE10*nmrtrLog10QwAB;
#// double[] logQwAB = new double[numMolsB];
#// //if (numMolsB > 0){
#// for (int iMol = 0; iMol < numMolsB; iMol++){
#// logQwAB[iMol] = logE10*log10QwABArr[iMol];
#// }
# //}
#//Molecular dissociation Boltzmann factors:
nmrtrBoltzFacIAB = 0.0
nmrtrLogMolSahaFac = 0.0
logDissE = math.log(nmrtrDissE) + Useful.logEv()
#//System.out.println("logDissE " + logE*logDissE)
logBoltzFacIAB = logDissE - Useful.logK()
#//System.out.println("logBoltzFacIAB " + logE*logBoltzFacIAB);
nmrtrBoltzFacIAB = math.exp(logBoltzFacIAB)
nmrtrLogMolSahaFac = (3.0 / 2.0) * (log2pi + nmrtrLogMuAB + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
#//System.out.println("nmrtrDissE " + nmrtrDissE + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrLogMuAB " + logE*nmrtrLogMuAB + " nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
boltzFacIAB = [0.0 for i in range(numMolsB)]
logMolSahaFac = [0.0 for i in range(numMolsB)]
#//if (numMolsB > 0){
for iMol in range(numMolsB):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#//System.out.println("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//double[] logNums = new double[numDeps]
#//}
#// For molecular species:
#double nmrtrSaha, nmrtrLogSahaMol, nmrtrLogInvSahaMol; //, nmrtrInvSahaMol;
logMolFrac = [0.0 for i in range(numDeps)]
logSahaMol = [0.0 for i in range(numMolsB)]
invSahaMol = [0.0 for i in range(numMolsB)]
#JB#
currentUwAArr=list(logUwA)#u(T) determined values
UwAFit = ToolBox.cubicFit(masterTemp, currentUwAArr)#u(T) fit
nmrtrLogUwBArr=list(nmrtrLogUwB)#u(T) determined values
nmrtrLogUwBFit = ToolBox.cubicFit(masterTemp, nmrtrLogUwBArr)#u(T) fit
#uwa.append(UwAFit)
#uwb.append(nmrtrLogUwBFit)
uwbFits=[]
qwabFit = []
for iMol in range(numMolsB):
currentUwBArr=list(logUwB[iMol])
UwBFit = ToolBox.cubicFit(masterTemp, currentUwBArr)
uwbFits.append(UwBFit)
currentLogQwABArr=list(logQwABArr[iMol])#u(T) determined values
QwABFit = ToolBox.cubicFit(masterTemp, currentLogQwABArr)#u(T) fit
qwabFit.append(QwABFit)
#nmrtrQwABArr=list(nmrtrLogQwAB)#u(T) determined values
#nmrtrQwABFit = ToolBox.cubicFit(masterTemp, nmrtrQwABArr)#u(T) fit
#for Mols in range(numMolsB):
# currentLogUwBArr=list(logUwB[Mols])#u(T) determined values
# UwBFit=cubicFit(masterTemp,currentLogUwBArr)#u(T) fit
#JB#
#//
temps=[]
#valb=[]
#vala=[]
#valnb=[]
#valqab=[]
#valnmrtrqwb=[]
#// System.out.println("molPops: id nmrtrLogNumB logNumBArr[0] logGroundRatio");
for id in range(numDeps):
#//System.out.format("%03d, %21.15f, %21.15f, %21.15f, %n", id, logE*nmrtrLogNumB[id], logE*logNumB[0][id], logE*logGroundRatio[id]);
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//Determine temparature dependent partition functions Uw:
thisTemp = temp[0][id]
temps.append(thisTemp)
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (Ttheta <= 0.5):
thisLogUwA = logUwA[1]
nmrtrThisLogUwB = nmrtrLogUwB[1]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwA = ( logUwA[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwA[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
nmrtrThisLogUwB = ( nmrtrLogUwB[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( nmrtrLogUwB[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
for iMol in range(numMolsB):
thisLogUwB[iMol] = ( logUwB[iMol][1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwB[iMol][0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
"""
#JB#
thisLogUwA = float(ToolBox.valueFromFit(UwAFit,thisTemp))#u(T) value extrapolated
#vala.append(thisLogUwA)
nmrtrThisLogUwB = float(ToolBox.valueFromFit(nmrtrLogUwBFit,thisTemp))#u(T) value extrapolated
#valnb.append(nmrtrThisLogUwB)
#for iMol in range(numMolsB):
# thisLogUwB[iMol]=logUwB[iMol]
for iMol in range(numMolsB):
thisLogUwB[iMol] = ToolBox.valueFromFit(uwbFits[iMol],thisTemp)#u(T) value extrapolated
#valb.append(thisLogUwB[iMol])
#// NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
if (thisTemp <= 130.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwA = logUwA[1] * (thisTemp - 130)/(500 - 130) \
+ logUwA[0] * (500 - thisTemp)/(500 - 130)
nmrtrThisLogUwB = nmrtrLogUwB[1] * (thisTemp - 130)/(500 - 130) \
+ nmrtrLogUwB[0] * (500 - thisTemp)/(500 - 130)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1] * (thisTemp - 130)/(500 - 130) \
+ logUwB[iMol][0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwA = logUwA[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwA[1] * (3000 - thisTemp)/(3000 - 500)
nmrtrThisLogUwB = nmrtrLogUwB[2] * (thisTemp - 500)/(3000 - 500) \
+ nmrtrLogUwB[1] * (3000 - thisTemp)/(3000 - 500)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][2] * (thisTemp - 500)/(3000 - 500) \
+ logUwB[iMol][1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwA = logUwA[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwA[2] * (8000 - thisTemp)/(8000 - 3000)
nmrtrThisLogUwB = nmrtrLogUwB[3] * (thisTemp - 3000)/(8000 - 3000) \
+ nmrtrLogUwB[2] * (8000 - thisTemp)/(8000 - 3000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwB[iMol][2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwA = logUwA[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwA[3] * (10000 - thisTemp)/(10000 - 8000)
nmrtrThisLogUwB = nmrtrLogUwB[4] * (thisTemp - 8000)/(10000 - 8000) \
+ nmrtrLogUwB[3] * (10000 - thisTemp)/(10000 - 8000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwB[iMol][3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
#iMol loops for Q's
for iMol in range(numMolsB):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
if (thisTemp < 3000.0):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( nmrtrLogQwAB[2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( nmrtrLogQwAB[3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( nmrtrLogQwAB[4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
# //Ionization stage Saha factors:
#//System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id]);
# // if (id == 16){
# // System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id] + " pp nmrtB " + (logE*(nmrtrLogNumB[id]+temp[1][id]+Useful.logK())) + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrLogQwAB " + logE*nmrtrThisLogQwAB);
# //System.out.println("nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + logE*nmrtrThisLogQwAB);
# // }
nmrtrLogSahaMol = nmrtrLogMolSahaFac - nmrtrLogNumB[id] - (nmrtrBoltzFacIAB / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + nmrtrThisLogUwB + thisLogUwA - nmrtrThisLogQwAB
nmrtrLogInvSahaMol = -1.0 * nmrtrLogSahaMol
#//System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#//nmrtrInvSahaMol = Math.exp(nmrtrLogSahaMol);
#// if (id == 16){
#// System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#// }
#// if (id == 16){
#// System.out.println("nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + nmrtrThisLogQwAB);
#// System.out.println("nmrtrLogSahaMol " + logE*nmrtrLogSahaMol); // + " nmrtrInvSahaMol " + nmrtrInvSahaMol);
#// }
#//Molecular Saha factors:
for iMol in range(numMolsB):
#//System.out.println("iMol " + iMol + " id " + id + " logNumB[iMol][id] " + logE*nmrtrLogNumB[id]);
#//System.out.println("iMol " + iMol + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " thisLogUwA " + logE*thisLogUwA + " thisLogQwAB " + logE*thisLogQwAB);
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + float(thisLogUwB[iMol]) + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//TEST invSahaMol[iMol] = 1.0e-99; //test
#// if (id == 16){
#// System.out.println("iMol " + iMol + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " logQwAB[iMol] " + logE*thisLogQwAB + " logNumB[iMol][id] " + logE*logNumB[iMol][id] + " logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#// System.out.println("iMol " + iMol + " logSahaMol " + logE*logSahaMol[iMol] + " invSahaMol[iMol] " + invSahaMol[iMol]);
#// }
#//Compute log of denominator is ionization fraction, f_stage
# //default initialization
# // - ratio of total atomic particles in all ionization stages to number in ground state:
denominator = math.exp(logGroundRatio[id]) #//default initialization - ratio of total atomic particles in all ionization stages to number in ground state
#//molecular contribution
for iMol in range(numMolsB):
#// if (id == 16){
#// System.out.println("invSahaMol[iMol] " + invSahaMol[iMol] + " denominator " + denominator);
#// }
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
#//System.out.println("logGroundRatio[id] " + logE*logGroundRatio[id] + " logDenominator " + logE*logDenominator);
#// if (id == 16){
#// System.out.println("id " + id + " logGroundRatio " + logGroundRatio[id] + " logDenominator " + logDenominator);
#// }
#//if (id == 36){
#// System.out.println("logDenominator " + logE*logDenominator);
#// }
#//var logDenominator = Math.log( 1.0 + saha21 + (saha32 * saha21) + (saha43 * saha32 * saha21) + (saha54 * saha43 * saha32 * saha21) );
logMolFrac[id] = nmrtrLogInvSahaMol - logDenominator
#// if (id == 16){
#// System.out.println("id " + id + " logMolFrac[id] " + logE*logMolFrac[id]);
#// }
#//logNums[id] = logNum[id] + logMolFrac;
#} //id loop
#JB - check (never used)#
#print(uwa)
#print(uwb)
#title("logUwA")
"""
plot(temps,vala)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwAFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogUwB")
plot(temps,valnb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrLogUwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logUwB")
plot(temps,valb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logQwAB")
plot(temps,valqab)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(QwABFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogQwAB")
plot(temps,valnmrtrqwb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrQwABFit,t))
scatter(masterTemp,(tempT))
show()
"""
#JB#
return logMolFrac
#//end method stagePops | mit |
sjperkins/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
cbertinato/pandas | pandas/tests/test_downstream.py | 1 | 4179 | """
Testing that we work in the downstream packages
"""
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="pandas-wheels-50", strict=False)
def test_missing_required_dependency():
# GH 23868
# To ensure proper isolation, we pass these flags
# -S : disable site-packages
# -s : disable user site-packages
# -E : disable PYTHON* env vars, especially PYTHONPATH
# And, that's apparently not enough, so we give up.
# https://github.com/MacPython/pandas-wheels/pull/50
call = ['python', '-sSE', '-c', 'import pandas']
with pytest.raises(subprocess.CalledProcessError) as exc:
subprocess.check_output(call, stderr=subprocess.STDOUT)
output = exc.value.stdout.decode()
for name in ['numpy', 'pytz', 'dateutil']:
assert name in output
| bsd-3-clause |
cgrima/rsr | rsr/fit.py | 1 | 4401 | """
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method == 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[np.isfinite(sample)]
if len(sample) == 0:
bad = True
sample = np.zeros(10)+1
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
# plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, .5, 10, None)
prm0.add('pt', np.average(sample)**2,False, 0, 1, 'a**2+2*s**2*mu')
#if fit_model == 'hk':
# # From [Dutt and Greenleaf. 1994, eq.14]
# prm0.add('a4', np.average(sample)**4,False, 0, 1,
# '8*(1+1/mu)*s**4 + 8*s**2*s**2 + a**4')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
values = {}
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# Identify bad results
if bad is True:
result.success = False
result.values['a'] = 0
result.values['s'] = 0
result.values['mu'] = 0
result.values['pt'] = 0
result.chisqr = 0
result.redchi = 0
result.message = 'No valid data in the sample'
result.residual = 0
return result
| mit |
gpersistence/tstop | python/persistence/PartitionData.py | 1 | 8153 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import os
import sys
import argparse
from math import ceil
import numpy
from sklearn.cross_validation import StratifiedKFold
from Datatypes.JSONObject import load_data, save_data
from Datatypes.Segments import SegmentInfo
from Datatypes.Configuration import Configuration
from Datatypes.TrainTestPartitions import TrainTestPartition, TrainTestPartitions
def PartitionData(segment_info, split, avoid_overlap=False, segment_size=0,
file_based=False, preserve_labels=False, override_preset=False, surpress_warning=False, seed=None) :
'''
Accepts a list of Datatype.Segments.SegmentInfo and a float between 0 and 1,
and outputs a pair of lists of indices, (train, test) corresponding
to a parition of the input list
len(train) approximates split * len(segment_info)
Intersection of train and test is an empty set
Union of train and test is not guaranteed to be range(len(segment_info))
Optional arguments:
avoid_overlap omits entries in test that would have overlapping data with entries in train,
as indicated by the range [segment_start:segment_start+segment_size]
segment_size interacts with avoid overlap, because only segment_start is contained in the
SegmentInfo class
file_based creates partitions where segments with the same filename for source data are
in the same partition
preserve_label tries to split the populations of labels evenly
'''
segment_count = len(segment_info)
segment_range = range(segment_count)
# check to see if we have a preset train / test split for all data and we aren't overriding that
if not override_preset and [0 for s in segment_info if s.learning == None] == [] :
return TrainTestPartition([i for i in segment_range if segment_info[i].learning == 'train'],
[i for i in segment_range if segment_info[i].learning == 'test'], None)
train_goal_len = int(ceil(segment_count * split))
if preserve_labels :
labels = [s.max_label() for s in segment_info]
label_set = list(set(labels))
label_count = [(l0,len([l for l in labels if l == l0])) for l0 in label_set]
label_goal = [(str(l), int(round(c * split))) for (l,c) in label_count]
for ((l0,g),(l1,c)) in zip(label_goal, label_count) :
if (g == 0) or (g == c) and not surpress_warning:
print "PartitionData warning: not enough entries (%d) of label %s to properly make a train / test split of ratio %s" % (c, l0, split)
label_goal = dict(label_goal)
train = []
test = []
if seed != None :
random.seed(seed)
state = random.getstate()
if file_based :
files = list(set([s.filename for s in segment_info]))
random.shuffle(files)
for f in files :
f_indices = [x for (x,y) in zip(segment_range, segment_info) if y.filename == f]
if preserve_labels :
f_labels = [str(labels[i]) for i in f_indices]
extend_train = True
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
if count > label_goal[l] :
extend_train = False
break
if extend_train :
train.extend(f_indices)
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
label_goal[l] = label_goal[l] - count
else :
test.extend(f_indices)
else :
if len(train) + len(f_indices) < train_goal_len :
train.extend(f_indices)
else :
test.extend(f_indices)
else :
random.shuffle(segment_range)
if preserve_labels :
for i in segment_range:
l = str(labels[i])
if label_goal[l] > 0 :
train.append(i)
label_goal[l] = label_goal[l] - 1
else :
test.append(i)
else :
train = segment_range[0:train_goal_len]
test = segment_range[train_goal_len:]
return TrainTestPartition(train,test,state)
def generate_partitions(config, segment_info, cv_iterations=0, seed=None) :
partition = PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=seed)
all_labels = [segment_info[i].max_label() for i in partition.train]
if cv_iterations > 0 :
skf = StratifiedKFold(all_labels, n_folds=cv_iterations)
cross_validation = [TrainTestPartition([partition.train[i] for i in train_index],
[partition.train[i] for i in test_index], None) \
for train_index, test_index in skf]
else :
cross_validation = None
learning_trials = [PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=None) for i in range(config.learning_iterations)]
return TrainTestPartitions(config, segment_info, cross_validation, learning_trials)
if __name__ == "__main__" :
parser = argparse.ArgumentParser("Tool to generate train / test splits for testing and cross validation")
parser.add_argument("--segments", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--learning-split", "-s", type=float)
parser.add_argument("--learning-iterations", "-I", type=int)
parser.add_argument("--cv-iterations", "-v", default=5, type=int)
parser.add_argument("--seed", "-S")
args = parser.parse_args(sys.argv[1:])
segments_json = load_data(args.segments, 'segments', None, None, sys.argv[0] + " : ")
if segments_json == None :
print "Could not load Segments from %s" % (args.segments,)
sys.exit(1)
segment_info = [SegmentInfo.fromJSONDict(s) for s in segments_json['segments']]
config = Configuration.fromJSONDict(segments_json['config'])
if args.learning_split != None :
config.learning_split = args.learning_split
if args.learning_iterations != None :
config.learning_iterations = args.learning_iterations
output = generate_partitions(config, segment_info, cv_iterations=args.cv_iterations, seed=args.seed)
if args.outfile == None :
args.outfile = TrainTestPartitions.get_partition_filename(config)
print "Writing %s" % (args.outfile,)
save_data(args.outfile, output.toJSONDict())
| gpl-3.0 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 9