repo_name
stringlengths 7
84
| path
stringlengths 5
184
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 978
477k
| license
stringclasses 15
values |
---|---|---|---|---|---|
YuncyYe/ml | mlf/pocketv1.py | 1 | 3503 |
#
#pocket Algorithm
#
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import random
##############################################
def sepLine(w, x):
return -((w[0]+w[1]*x)/w[2])
#end
def drawSepLine(w, minX, maxX):
sepx = range(minX, maxX)
sepy = []
for e in sepx:
tmp = sepLine(w, e)
sepy.append( tmp )
#end for
plt.plot(sepx, sepy )
#end drawSepLine
##############################################
ls=np.array([
[1.0, 0.5, 1]
,[1.5, 14.5, 1]
,[2.5, 1.5, 1]
,[2.8, 3.5, 1]
,[4.5, 13.0, 1]
,[6.0, 8.0, 1]
,[7.0, 16.0, 1] #noize data
,[8.0, 5.5, 1]
,[9.5, 7.0, 1]
,[12.0, 2.5, 1]
,[14.0, 2.0, 1]
#,[7.0, 16.0, 1] #noize data
])
rs=np.array([
[2.0, 18.0, -1]
,[3.0, 17.5, -1]
,[3.5, 0.7, -1] #noize data
,[8.0,11.5, -1]
,[8.5,13.5, -1]
,[8.5,13.0, -1]
,[9.0,15, -1]
,[12.0,20.0,-1]
,[16.0,17.0,-1]
#,[3.5, 0.7, -1] #noize data
])
##construct training data
rtd = np.concatenate((ls,rs))
minX = (int)(np.min(rtd[:,:1]))-3
maxX = (int)(np.max(rtd[:,:1]))+3
###plot the data
plt.xlim( (minX, maxX) )
plt.ylim( (np.min(rtd[:,1:2]-3), np.max(rtd[:,1:2]+3)) )
plt.plot(ls[:,:1], ls[:, 1:2], '*')
plt.plot(rs[:,:1], rs[:, 1:2], '+')
##############pla-begin
x0 = np.zeros( (len(rtd), 1) )
x0[:]=1.0
td = np.concatenate( (x0, rtd[:,:1], rtd[:,1:2], rtd[:,2:3]), 1 )
#The this initial value of w. td[0] include y. so we need to minus 1
w=np.zeros( len(td[0])-1 );
#todo:we can set it as max of float
weighOfPocket=1000000000.0
wPocket=w
#
#ensure all point corret
#maxIter=900000
maxIter=1200000
weighOfPocketThres=0.05
curIter=0
while(curIter<maxIter):
curIter = curIter +1;
#[begin----the following is typical pla----
isModifing=False;
#check each point for w
for ti in range(len(td)):
rndIdx=random.randint(0, len(td)-1)
sample = td[rndIdx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
#failed, we need to update w
w = w + sy*sx
isModifing = True
#end if
#end for
if(isModifing==False):
break;
#todo. we need to update pocket here.
#end]
#pick up an element in sample to try to improve w
#rndIdx=random.randint(0, len(td)-1)
#sample = td[rndIdx]
#sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
#w = w + sy*sx
#It's too late to check weight for this w
#calc weight for w
weight=0.;
for idx in range(len(td)):
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
weight += 1.0;
#end for
#print("The curIter is ", curIter)
#print("The weighOfPocket is ", weighOfPocket)
#print("The w is ", w)
#drawSepLine(w, minX, maxX)
#if the new w is better than stuff in pocket, then update stuff in pocket
if(weight<weighOfPocket):
weighOfPocket = weight
wPocket = w
#end if
if(weighOfPocket<weighOfPocketThres):
break;
#end for
##############pla-end
print("The curIter is ", curIter)
print("The weighOfPocket is ", weighOfPocket)
print("The w is ", w)
#show the seperator line
drawSepLine(w, minX, maxX);
###
#In [93]: import pla
#In [94]: reload(pla)
#
if __name__ =="__main__":
pass
#end
| apache-2.0 |
3like3beer/openrevman | openrevman/control_computer/solver.py | 1 | 8426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import pulp
from numpy import dot
from pandas import DataFrame, read_table
from scipy.sparse import csgraph
class Controls:
def __init__(self, accepted_demand: DataFrame, product_bid_prices: DataFrame, expected_revenue: float = None):
self.accepted_demand = accepted_demand
self.product_bid_prices = product_bid_prices
self.expected_revenue = expected_revenue
class Problem:
def __init__(self, demand_vector, price_vector, capacity_vector, demand_utilization_matrix, demand_profile=None):
self.demand_vector = demand_vector
self.price_vector = price_vector
self.capacity_vector = capacity_vector
self.demand_utilization_matrix = demand_utilization_matrix
self.demand_profile = demand_profile
self.demand_correlations = self.get_demand_correlations()
def get_demand_correlations(self):
return dot(self.demand_utilization_matrix, self.demand_utilization_matrix.transpose())
def get_subproblems(self, eps=0.1):
subproblems = []
labels = csgraph.connected_components(self.demand_correlations, directed=False)[1]
split_index = collections.Counter(labels).values()
prev = 0
for i in split_index:
demand_vector = self.demand_vector[prev:prev + i]
price_vector = self.price_vector[prev:prev + i]
capacity_vector = self.capacity_vector
demand_utilization_matrix = self.demand_utilization_matrix.ix[prev:prev + i, :]
demand_profile = None
if self.demand_profile is not None:
demand_profile = self.demand_profile.ix[prev:prev + i - 1, :]
subproblems.append(
Problem(demand_vector=demand_vector, price_vector=price_vector, capacity_vector=capacity_vector,
demand_utilization_matrix=demand_utilization_matrix, demand_profile=demand_profile))
prev = i
return subproblems
class Solver:
def __init__(self, optimizer):
self.optimizer = optimizer
self.controls = None
def optimize_controls(self, problem):
self.controls = pulp_solve(problem.demand_vector, problem.price_vector, problem.capacity_vector,
problem.demand_utilization_matrix)
return self.controls
def optimize_controls_multi_period(self, problem, eps):
if problem.demand_profile.shape[1] > 1:
for period in problem.demand_profile.columns:
if self.controls:
new_control = pulp_solve(problem.demand_profile.ix[:, period], problem.price_vector,
problem.capacity_vector,
problem.demand_utilization_matrix)
if self.is_new_ctrl_more_profitable(new_control, 0.1):
self.blinde_control(new_control, eps)
else:
self.controls = self.optimize_controls(problem)
else:
self.controls = self.optimize_controls(problem)
return self.controls
def is_new_ctrl_more_profitable(self, new_control, eps):
rev1 = self.controls.expected_revenue
if new_control.expected_revenue - rev1 > rev1 * eps:
return True
return False
def blinde_control(self, new_control, eps):
self.controls.accepted_demand = self.controls.accepted_demand * eps
self.controls.product_bid_prices = self.controls.product_bid_prices / eps
self.controls.expected_revenue = new_control.expected_revenue
def to_data_frame(data):
df = DataFrame.transpose(read_table(data, delim_whitespace=True, header=None))
df.columns = [(col + 1) for col in df.columns]
return df
def to_data_frame2(data):
df = DataFrame(read_table(data, delim_whitespace=True, header=None))
return df
def create_problem_with_data(demand_data, capacity_data, demand_utilization_data, demand_profile_data=None):
demand_vector, capacity_vector, demand_profile, demand_utilization_matrix = load_data_to_df(capacity_data,
demand_data,
demand_profile_data,
demand_utilization_data)
return Problem(demand_vector.ix[:, 1], demand_vector.ix[:, 2], capacity_vector,
demand_utilization_matrix.ix[:, :],
demand_profile)
def merge_controls(controls_list):
first_time = True
for controls in controls_list:
if first_time:
accepted_demand = controls.accepted_demand
product_bid_prices = controls.product_bid_prices
expected_revenue = controls.expected_revenue
first_time = False
else:
accepted_demand = accepted_demand.append(controls.accepted_demand)
product_bid_prices = product_bid_prices.append(controls.product_bid_prices)
expected_revenue = expected_revenue + controls.expected_revenue
return Controls(accepted_demand=accepted_demand, product_bid_prices=product_bid_prices,
expected_revenue=expected_revenue)
def load_data_to_df(capacity_data, demand_data, demand_profile_data, demand_utilization_data):
demand_vector = to_data_frame(demand_data)
capacity_vector = to_data_frame(capacity_data)
demand_utilization_matrix = to_data_frame2(demand_utilization_data)
assert demand_utilization_matrix.shape[0] == demand_vector.shape[0]
assert demand_utilization_matrix.shape[1] == capacity_vector.shape[0]
if demand_profile_data:
demand_profile = to_data_frame(demand_profile_data)
assert demand_profile.shape[0] == demand_vector.shape[0]
else:
demand_profile = None
return demand_vector, capacity_vector, demand_profile, demand_utilization_matrix
def pulp_solve(demand_vector, price_vector, capacity_vector, demand_utilization_matrix):
revman = create_problem()
x = create_variables(demand_vector)
set_objective(demand_vector, price_vector, revman, x)
add_product_constraints(capacity_vector, demand_utilization_matrix, demand_vector, revman, x)
add_demand_constraints(demand_vector, revman, x)
solve_problem(revman)
accepted_demand = get_accepted_demand(x)
product_bid_prices = get_bid_prices(capacity_vector, revman)
expected_revenue = get_expected_revenue(revman)
return Controls((accepted_demand), (product_bid_prices), expected_revenue)
def solve_problem(revman):
revman.solve(pulp.PULP_CBC_CMD())
# revman.writeLP("temp.txt")
# print(pulp.LpStatus[revman.status])
def create_problem():
return pulp.LpProblem("revman", pulp.LpMaximize)
def get_expected_revenue(revman):
return pulp.value(revman.objective)
def get_accepted_demand(x):
return DataFrame({'accepted_demand': [(x[str(i)].value()) for i in x]})
def get_bid_prices(capacity_vector, revman):
bid_prices_list = [revman.constraints.get("Capa_" + str(i)).pi for (i, c) in (capacity_vector.iterrows())]
return DataFrame({'bid_prices_list': bid_prices_list})
def add_demand_constraints(demand_vector, revman, x):
for (demand_index, demand) in (demand_vector.iteritems()):
revman.addConstraint((x[str(demand_index)]) <= demand, name="Demand_" + str(demand_index))
def add_product_constraints(capacity_vector, demand_utilization_matrix, demand_vector, revman, x):
for (product_index, capacity) in (capacity_vector.iterrows()):
revman.addConstraint(pulp.lpSum(
[x[str(i)] * demand_utilization_matrix.ix[i, product_index] for (i, d) in demand_vector.iteritems()]) <=
capacity,
name="Capa_" + str(product_index))
def set_objective(demand_vector, price_vector, revman, x):
objective = pulp.LpAffineExpression([(x[str(i)], price_vector[i]) for (i, d) in demand_vector.iteritems()])
revman.setObjective(objective)
def create_variables(demand_vector):
x = dict([(str(i), pulp.LpVariable(name="x" + str(i), lowBound=0, cat=pulp.LpContinuous)) for (i, t) in
demand_vector.iteritems()])
return x
| gpl-3.0 |
fosfataza/protwis | mutational_landscape/views.py | 1 | 34851 | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.core.cache import cache
from django.db.models import Count, Min, Sum, Avg, Q
from django.core.cache import cache
from django.views.decorators.cache import cache_page
from protein.models import Protein, ProteinConformation, ProteinAlias, ProteinFamily, Gene, ProteinGProtein, ProteinGProteinPair
from residue.models import Residue, ResiduePositionSet, ResidueSet
from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs, NHSPrescribings
from common.diagrams_gpcr import DrawHelixBox, DrawSnakePlot
from drugs.models import Drugs
from mutation.functions import *
from mutation.models import *
from interaction.models import *
from interaction.views import ajax #import x-tal interactions
from common import definitions
from collections import OrderedDict
from common.views import AbsTargetSelection
from common.views import AbsSegmentSelection
from family.views import linear_gradient, color_dict, RGB_to_hex, hex_to_RGB
import re
import json
import numpy as np
from collections import OrderedDict
from copy import deepcopy
from io import BytesIO
import re
import math
import urllib
import xlsxwriter #sudo pip3 install XlsxWriter
import operator
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 1
filters = False
psets = False
# docs = 'mutations.html#mutation-browser'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Show missense variants',
'url': '/mutational_landscape/render',
'color': 'success',
},
}
default_species = False
def render_variants(request, protein=None, family=None, download=None, receptor_class=None, gn=None, aa=None, **response_kwargs):
simple_selection = request.session.get('selection', False)
proteins = []
if protein: # if protein static page
proteins.append(Protein.objects.get(entry_name=protein.lower()))
target_type = 'protein'
# flatten the selection into individual proteins
if simple_selection:
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
elif target.type == 'family':
target_type = 'family'
familyname = target.item
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list),
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
NMs = NaturalMutations.objects.filter(Q(protein__in=proteins)).prefetch_related('residue__generic_number','residue__display_generic_number','residue__protein_segment','protein')
ptms = PTMs.objects.filter(Q(protein__in=proteins)).prefetch_related('residue')
ptms_dict = {}
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = Residue.objects.filter(protein_conformation__protein=proteins[0], generic_number__label__in=ms_label)
ms_sequence_numbers = []
for ms in ms_object:
ms_sequence_numbers.append(ms.sequence_number)
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = Residue.objects.filter(protein_conformation__protein=proteins[0], generic_number__label__in=ms_label)
sp_sequence_numbers = []
for sp in sp_object:
sp_sequence_numbers.append(sp.sequence_number)
for ptm in ptms:
ptms_dict[ptm.residue.sequence_number] = ptm.modification
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
### GET LB INTERACTION DATA
# get also ortholog proteins, which might have been crystallised to extract
# interaction data also from those
if protein:
orthologs = Protein.objects.filter(family__slug=proteins[0].family.slug, sequence_type__slug='wt')
else:
orthologs = Protein.objects.filter(family__slug__startswith=proteins[0].family.slug, sequence_type__slug='wt')
interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').all()
interaction_data = {}
for interaction in interactions:
if interaction.rotamer.residue.generic_number:
sequence_number = interaction.rotamer.residue.sequence_number
# sequence_number = lookup[interaction.rotamer.residue.generic_number.label]
label = interaction.rotamer.residue.generic_number.label
aa = interaction.rotamer.residue.amino_acid
interactiontype = interaction.interaction_type.name
if sequence_number not in interaction_data:
interaction_data[sequence_number] = []
if interactiontype not in interaction_data[sequence_number]:
interaction_data[sequence_number].append(interactiontype)
if target_type == 'family':
pc = ProteinConformation.objects.get(protein__family__name=familyname, protein__sequence_type__slug='consensus')
residuelist = Residue.objects.filter(protein_conformation=pc).order_by('sequence_number').prefetch_related('protein_segment', 'generic_number', 'display_generic_number')
else:
residuelist = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment', 'display_generic_number', 'generic_number')
jsondata = {}
for NM in NMs:
functional_annotation = ''
SN = NM.residue.sequence_number
if NM.residue.generic_number:
GN = NM.residue.generic_number.label
else:
GN = ''
if SN in sp_sequence_numbers:
functional_annotation += 'SodiumPocket '
if SN in ms_sequence_numbers:
functional_annotation += 'MicroSwitch '
if SN in ptms_dict:
functional_annotation += 'PTM (' + ptms_dict[SN] + ') '
if SN in interaction_data:
functional_annotation += 'LB (' + ', '.join(interaction_data[SN]) + ') '
if GN in gprotein_generic_set:
functional_annotation += 'GP (contact) '
ms_type = NM.type
if ms_type == 'missense':
effect = 'deleterious' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else 'tolerated'
color = '#e30e0e' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else '#70c070'
else:
effect = 'deleterious'
color = '#575c9d'
# account for multiple mutations at this position!
NM.functional_annotation = functional_annotation
# print(NM.functional_annotation)
jsondata[SN] = [NM.amino_acid, NM.allele_frequency, NM.allele_count, NM.allele_number, NM.number_homozygotes, NM.type, effect, color, functional_annotation]
natural_mutation_list = {}
max_snp_pos = 1
for NM in NMs:
if NM.residue.generic_number:
if NM.residue.generic_number.label in natural_mutation_list:
natural_mutation_list[NM.residue.generic_number.label]['val'] += 1
if not str(NM.amino_acid) in natural_mutation_list[NM.residue.generic_number.label]['AA']:
natural_mutation_list[NM.residue.generic_number.label]['AA'] = natural_mutation_list[NM.residue.generic_number.label]['AA'] + str(NM.amino_acid) + ' '
if natural_mutation_list[NM.residue.generic_number.label]['val'] > max_snp_pos:
max_snp_pos = natural_mutation_list[NM.residue.generic_number.label]['val']
else:
natural_mutation_list[NM.residue.generic_number.label] = {'val':1, 'AA': NM.amino_acid + ' '}
jsondata_natural_mutations = {}
for r in residuelist:
if r.generic_number:
if r.generic_number.label in natural_mutation_list:
jsondata_natural_mutations[r.sequence_number] = natural_mutation_list[r.generic_number.label]
jsondata_natural_mutations['color'] = linear_gradient(start_hex="#c79494", finish_hex="#c40100", n=max_snp_pos)
# jsondata_cancer_mutations['color'] = linear_gradient(start_hex="#d8baff", finish_hex="#422d65", n=max_cancer_pos)
# jsondata_disease_mutations['color'] = linear_gradient(start_hex="#ffa1b1", finish_hex="#6e000b", n=max_disease_pos)
#
SnakePlot = DrawSnakePlot(residuelist, "Class A", protein, nobuttons=1)
HelixBox = DrawHelixBox(residuelist, 'Class A', protein, nobuttons=1)
# EXCEL TABLE EXPORT
if download:
data = []
for r in NMs:
values = r.__dict__
data.append(values)
headers = ['type', 'amino_acid', 'allele_count', 'allele_number', 'allele_frequency', 'polyphen_score', 'sift_score', 'number_homozygotes', 'functional_annotation']
# EXCEL SOLUTION
output = BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
col = 0
for h in headers:
worksheet.write(0, col, h)
col += 1
row = 1
for d in data:
col = 0
for h in headers:
worksheet.write(row, col, str(d[h]))
col += 1
row += 1
workbook.close()
output.seek(0)
xlsx_data = output.read()
response = HttpResponse(xlsx_data, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=GPCRdb_' + proteins[0].entry_name + '_variant_data.xlsx' # % 'mutations'
return response
return render(request, 'browser.html', {'mutations': NMs, 'type': target_type, 'HelixBox': HelixBox, 'SnakePlot': SnakePlot, 'receptor': str(proteins[0].entry_name), 'mutations_pos_list': json.dumps(jsondata), 'natural_mutations_pos_list': json.dumps(jsondata_natural_mutations)})
def ajaxNaturalMutation(request, slug, **response_kwargs):
name_of_cache = 'ajaxNaturalMutation_'+slug
ptms = PTMs.objects.filter(protein__entry_name=slug).prefetch_related('residue')
ptms_dict = {}
for ptm in ptms:
ptms_dict[ptm.residue.sequence_number] = ptm.modification
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=ms_label)
ms_sequence_numbers = []
for ms in ms_object:
ms_sequence_numbers.append(ms.sequence_number)
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=ms_label)
sp_sequence_numbers = []
for sp in sp_object:
sp_sequence_numbers.append(sp.sequence_number)
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
### GET LB INTERACTION DATA
# get also ortholog proteins, which might have been crystallised to extract
# interaction data also from those
p = Protein.objects.get(entry_name=slug)
orthologs = Protein.objects.filter(family__slug__startswith=p.family.slug, sequence_type__slug='wt')
interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').order_by('rotamer__residue__sequence_number')
interaction_data = {}
for interaction in interactions:
if interaction.rotamer.residue.generic_number:
sequence_number = interaction.rotamer.residue.sequence_number
# sequence_number = lookup[interaction.rotamer.residue.generic_number.label]
label = interaction.rotamer.residue.generic_number.label
aa = interaction.rotamer.residue.amino_acid
interactiontype = interaction.interaction_type.name
if sequence_number not in interaction_data:
interaction_data[sequence_number] = []
if interactiontype not in interaction_data[sequence_number]:
interaction_data[sequence_number].append(interactiontype)
jsondata = cache.get(name_of_cache)
if jsondata == None:
jsondata = {}
NMs = NaturalMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
for NM in NMs:
SN = NM.residue.sequence_number
type = NM.type
if type == 'missense':
effect = 'deleterious' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else 'tolerated'
color = '#e30e0e' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else '#70c070'
else:
effect = 'deleterious'
color = '#575c9d'
functional_annotation = ''
SN = NM.residue.sequence_number
if NM.residue.generic_number:
GN = NM.residue.generic_number.label
else:
GN = ''
if SN in sp_sequence_numbers:
functional_annotation += 'SodiumPocket '
if SN in ms_sequence_numbers:
functional_annotation += 'MicroSwitch '
if SN in ptms_dict:
functional_annotation += 'PTM (' + ptms_dict[SN] + ') '
if SN in interaction_data:
functional_annotation += 'LB (' + ', '.join(interaction_data[SN]) + ') '
if GN in gprotein_generic_set:
functional_annotation += 'GP (contact) '
if functional_annotation == '':
functional_annotation = '-'
# account for multiple mutations at this position!
jsondata[SN] = [NM.amino_acid, NM.allele_frequency, NM.allele_count, NM.allele_number, NM.number_homozygotes, NM.type, effect, color, functional_annotation]
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
cache.set(name_of_cache, jsondata, 20) # 60*60*24*2 two days timeout on cache
return HttpResponse(jsondata, **response_kwargs)
def ajaxPTMs(request, slug, **response_kwargs):
name_of_cache = 'ajaxPTMs_'+slug
jsondata = cache.get(name_of_cache)
if jsondata == None:
jsondata = {}
NMs = PTMs.objects.filter(protein__entry_name=slug).prefetch_related('residue')
for NM in NMs:
SN = NM.residue.sequence_number
mod = NM.modification
jsondata[SN] = [mod]
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
cache.set(name_of_cache, jsondata, 20) # 60*60*24*2 two days timeout on cache
return HttpResponse(jsondata, **response_kwargs)
# def ajaxCancerMutation(request, slug, **response_kwargs):
#
# name_of_cache = 'ajaxCancerMutation_'+slug
#
# jsondata = cache.get(name_of_cache)
#
# if jsondata == None:
# jsondata = {}
#
# CMs = CancerMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
#
# for CM in CMs:
# SN = CM.residue.sequence_number
# jsondata[SN] = [CM.amino_acid]
#
# jsondata = json.dumps(jsondata)
# response_kwargs['content_type'] = 'application/json'
#
# cache.set(name_of_cache, jsondata, 20) #two days timeout on cache
#
# return HttpResponse(jsondata, **response_kwargs)
#
# def ajaxDiseaseMutation(request, slug, **response_kwargs):
#
# name_of_cache = 'ajaxDiseaseMutation_'+slug
#
# jsondata = cache.get(name_of_cache)
#
# if jsondata == None:
# jsondata = {}
#
# DMs = DiseaseMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
#
# for DM in DMs:
# SN = DM.residue.sequence_number
# jsondata[SN] = [DM.amino_acid]
#
# jsondata = json.dumps(jsondata)
# response_kwargs['content_type'] = 'application/json'
#
# cache.set(name_of_cache, jsondata, 20) #two days timeout on cache
#
# return HttpResponse(jsondata, **response_kwargs)
def mutant_extract(request):
import pandas as pd
mutations = MutationExperiment.objects.all().prefetch_related('residue__display_generic_number','protein__family','exp_func','exp_type','ligand','ligand_role','refs','mutation')
# mutations = MutationExperiment.objects.filter(protein__entry_name__startswith=slug_without_species).order_by('residue__sequence_number').prefetch_related('residue')
temp = pd.DataFrame(columns=['EntryName','Family','LigandType','Class','SequenceNumber','GPCRdb','Segment','WTaa','Mutantaa','foldchange','Ligand','LigandRole','ExpQual','ExpWTValue','ExpWTVUnit','ExpMutantValue','ExpMutantSign','ExpType','ExpFunction'])
row = 0
for mutation in mutations:
if mutation.ligand:
ligand = mutation.ligand.name
else:
ligand = 'NaN'
if mutation.exp_qual:
qual = mutation.exp_qual.qual
else:
qual = 'NaN'
if mutation.exp_func_id:
func = mutation.exp_func.func
else:
func = 'NaN'
if mutation.ligand_role_id:
lrole = mutation.ligand_role.name
else:
lrole = 'NaN'
if mutation.exp_type_id:
etype = mutation.exp_type.type
else:
etype = 'NaN'
if mutation.residue.display_generic_number:
gpcrdb = mutation.residue.display_generic_number.label
else:
gpcrdb = 'NaN'
if mutation.foldchange != 0:
# print(mutation.protein.entry_name, mutation.residue.sequence_number, mutation.residue.amino_acid, mutation.mutation.amino_acid, mutation.foldchange,ligand, lrole,qual,mutation.wt_value, mutation.wt_unit, mutation.mu_value, mutation.mu_sign, etype, func)
temp.loc[row] = pd.Series({'EntryName': mutation.protein.entry_name, 'Family': mutation.protein.family.parent.name,'LigandType': mutation.protein.family.parent.parent.name,'Class': mutation.protein.family.parent.parent.parent.name, 'SequenceNumber': int(mutation.residue.sequence_number), 'GPCRdb': gpcrdb, 'Segment': mutation.residue.protein_segment.slug,'WTaa': mutation.residue.amino_acid, 'Mutantaa': mutation.mutation.amino_acid, 'foldchange': mutation.foldchange, 'Ligand': ligand, 'LigandRole': lrole, 'ExpQual': qual, 'ExpWTValue': mutation.wt_value, 'ExpWTVUnit': mutation.wt_unit, 'ExpMutantValue': mutation.mu_value, 'ExpMutantSign': mutation.mu_sign, 'ExpType': etype, 'ExpFunction': func})
row += 1
if row % 200 == 0 and row != 0:
print(row)
temp.to_csv('170125_GPCRdb_mutation.csv')
# jsondata[mutation.residue.sequence_number].append([mutation.foldchange,ligand,qual])
# print(jsondata)
@cache_page(60*60*24*21)
def statistics(request):
context = dict()
families = ProteinFamily.objects.all()
lookup = {}
for f in families:
lookup[f.slug] = f.name.replace("receptors","").replace(" receptor","").replace(" hormone","").replace("/neuropeptide","/").replace(" (G protein-coupled)","").replace(" factor","").replace(" (LPA)","").replace(" (S1P)","").replace("GPR18, GPR55 and GPR119","GPR18/55/119").replace("-releasing","").replace(" peptide","").replace(" and oxytocin","/Oxytocin").replace("Adhesion class orphans","Adhesion orphans").replace("muscarinic","musc.").replace("-concentrating","-conc.")
class_proteins = Protein.objects.filter(family__slug__startswith="00",source__name='SWISSPROT', species_id=1).prefetch_related('family').order_by('family__slug')
temp = OrderedDict([
('name',''),
('number_of_variants', 0),
('number_of_children', 0),
('receptor_t',0),
('density_of_variants', 0),
('children', OrderedDict())
])
coverage = OrderedDict()
# Make the scaffold
for p in class_proteins:
#print(p,p.family.slug)
fid = p.family.slug.split("_")
if fid[0] not in coverage:
coverage[fid[0]] = deepcopy(temp)
coverage[fid[0]]['name'] = lookup[fid[0]]
if fid[1] not in coverage[fid[0]]['children']:
coverage[fid[0]]['children'][fid[1]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['name'] = lookup[fid[0]+"_"+fid[1]]
if fid[2] not in coverage[fid[0]]['children'][fid[1]]['children']:
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['name'] = lookup[fid[0]+"_"+fid[1]+"_"+fid[2]][:28]
if fid[3] not in coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children']:
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['name'] = p.entry_name.split("_")[0] #[:10]
coverage[fid[0]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['receptor_t'] = 1
# # POULATE WITH DATA
variants_target = Protein.objects.filter(family__slug__startswith="00", entry_name__icontains='_human').values('family_id__slug').annotate(value=Count('naturalmutations__residue_id', distinct = True))
protein_lengths = Protein.objects.filter(family__slug__startswith="00", entry_name__icontains='_human').values('family_id__slug','sequence')
protein_lengths_dict = {}
for i in protein_lengths:
protein_lengths_dict[i['family_id__slug']] = i['sequence']
for i in variants_target:
# print(i)
fid = i['family_id__slug'].split("_")
coverage[fid[0]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['number_of_variants'] += i['value']
density = float(i['value'])/len(protein_lengths_dict[i['family_id__slug']])
coverage[fid[0]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['number_of_children'] += 1
# MAKE THE TREE
tree = OrderedDict({'name':'GPCRs','children':[]})
i = 0
n = 0
for c,c_v in coverage.items():
c_v['name'] = c_v['name'].split("(")[0]
if c_v['name'].strip() in ['Other GPCRs']:
# i += 1
continue
# pass
children = []
for lt,lt_v in c_v['children'].items():
if lt_v['name'].strip() == 'Orphan' and c_v['name'].strip()=="Class A":
# $pass
continue
children_rf = []
for rf,rf_v in lt_v['children'].items():
rf_v['name'] = rf_v['name'].split("<")[0]
children_r = []
for r,r_v in rf_v['children'].items():
r_v['sort'] = n
children_r.append(r_v)
n += 1
rf_v['children'] = children_r
rf_v['sort'] = n
children_rf.append(rf_v)
lt_v['children'] = children_rf
lt_v['sort'] = n
children.append(lt_v)
c_v['children'] = children
c_v['sort'] = n
tree['children'].append(c_v)
#tree = c_v
#break
i += 1
context['tree'] = json.dumps(tree)
## Overview statistics
total_receptors = NaturalMutations.objects.filter(type='missense').values('protein_id').distinct().count()
total_mv = len(NaturalMutations.objects.filter(type='missense'))
total_lof = len(NaturalMutations.objects.exclude(type='missense'))
total_av_rv = round(len(NaturalMutations.objects.filter(type='missense', allele_frequency__lt=0.001))/ total_receptors,1)
total_av_cv = round(len(NaturalMutations.objects.filter(type='missense', allele_frequency__gte=0.001))/ total_receptors,1)
context['stats'] = {'total_mv':total_mv,'total_lof':total_lof,'total_av_rv':total_av_rv, 'total_av_cv':total_av_cv}
return render(request, 'variation_statistics.html', context)
def get_functional_sites(protein):
## PTMs
ptms = list(PTMs.objects.filter(protein=protein).values_list('residue', flat=True).distinct())
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=ms_label).values_list('id', flat=True).distinct())
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=ms_label).values_list('id', flat=True).distinct())
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
GP_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=gprotein_generic_set).values_list('id', flat=True).distinct())
### GET LB INTERACTION DATA
## get also ortholog proteins, which might have been crystallised to extract
## interaction data also from those
orthologs = Protein.objects.filter(family__slug__startswith=protein.family.slug, sequence_type__slug='wt').prefetch_related('protein__family')
interaction_residues = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').values_list('rotamer__residue_id', flat=True).distinct()
## Get variants of these known residues:
known_function_sites = set(x for l in [GP_object,sp_object,ms_object,ptms,interaction_residues] for x in l)
NMs = NaturalMutations.objects.filter(residue_id__in=known_function_sites)
return len(NMs)
@cache_page(60*60*24*21)
def economicburden(request):
economic_data = [{'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 29574708, 'x': 'putative-homozygous'}, {'y': 186577951, 'x': 'putative-all variants'}], 'key': 'Analgesics'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 14101883, 'x': 'putative-all variants'}], 'key': 'Antidepressant Drugs'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 10637449, 'x': 'putative-all variants'}], 'key': 'Antihist, Hyposensit & Allergic Emergen'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 6633692, 'x': 'putative-all variants'}], 'key': 'Antispasmod.&Other Drgs Alt.Gut Motility'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 8575714, 'x': 'putative-homozygous'}, {'y': 27008513, 'x': 'putative-all variants'}], 'key': 'Beta-Adrenoceptor Blocking Drugs'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 10108322, 'x': 'known-all variants'}, {'y': 25187489, 'x': 'putative-homozygous'}, {'y': 89224667, 'x': 'putative-all variants'}], 'key': 'Bronchodilators'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 5466184, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 10313279, 'x': 'putative-all variants'}], 'key': 'Drugs For Genito-Urinary Disorders'}, {'values': [{'y': 13015487, 'x': 'known-homozygous'}, {'y': 44334808, 'x': 'known-all variants'}, {'y': 13015487, 'x': 'putative-homozygous'}, {'y': 45130626, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Diabetes'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 12168533, 'x': 'putative-all variants'}], 'key': "Drugs Used In Park'ism/Related Disorders"}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 28670250, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Psychoses & Rel.Disorders'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 11069531, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Substance Dependence'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 8694786, 'x': 'putative-all variants'}], 'key': 'Hypothalamic&Pituitary Hormones&Antioest'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 9855456, 'x': 'putative-all variants'}], 'key': 'Sex Hormones & Antag In Malig Disease'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 7848808, 'x': 'putative-homozygous'}, {'y': 25446045, 'x': 'putative-all variants'}], 'key': 'Treatment Of Glaucoma'}, {'values': [{'y': 864112, 'x': 'known-homozygous'}, {'y': 6107013, 'x': 'known-all variants'}, {'y': 19047162, 'x': 'putative-homozygous'}, {'y': 15754588, 'x': 'putative-all variants'}], 'key': 'other'}]
### PER DRUG TABLE
## drug data
nhs_sections = NHSPrescribings.objects.all().values("drugname__name", "bnf_section").distinct()
section_dict = {}
for drug in nhs_sections:
if drug['drugname__name'] in section_dict:
section_dict[drug['drugname__name']].append(drug['bnf_section'])
else:
section_dict[drug['drugname__name']] = [drug['bnf_section']]
nhs_data = NHSPrescribings.objects.all().values('drugname__name').annotate(Avg('actual_cost'), Avg('items'), Avg('quantity'))
drug_data = []
temp = {}
for i in nhs_data:
## druginformation
drugname = i['drugname__name']
average_cost = int(i['actual_cost__avg'])
average_quantity = int(i['quantity__avg'])
average_items = int(i['items__avg'])
section = section_dict[drugname]
if average_items > 0:
item_cost= round(float(average_cost)/average_items,1)
else:
item_cost = 0
## get target information
protein_targets = Protein.objects.filter(drugs__name=drugname).distinct()
targets = [p.entry_name.split('_human')[0].upper() for p in list(protein_targets)]
known_functional = 0
for target in protein_targets:
if target.entry_name in temp:
known_functional += temp[target.entry_name]
else:
function_sites = get_functional_sites(target)
known_functional += function_sites
temp[target.entry_name] = function_sites
putative_func = len(NaturalMutations.objects.filter(Q(protein__in=protein_targets), Q(sift_score__lte=0.05) | Q(polyphen_score__gte=0.1)).annotate(count_putative_func=Count('id')))
jsondata = {'drugname':drugname, 'targets': targets, 'average_cost': average_cost, 'average_quantity': average_quantity, 'average_items':average_items, 'item_cost':item_cost, 'known_func': known_functional, 'putative_func':putative_func, 'section':section}
drug_data.append(jsondata)
return render(request, 'economicburden.html', {'data':economic_data, 'drug_data':drug_data})
| apache-2.0 |
tdgoodrich/mase | models/icse14-v5-min.py | 13 | 51518 | from __future__ import division
import sys,collections,random
sys.dont_write_bytecode = True
def shuffle(lst):
random.shuffle(lst)
return lst
class Thing():
id = -1
def __init__(i,**fields) :
i.override(fields)
i._id = Thing.id = Thing.id + 1
i.finalize()
def finalize(i): pass
def override(i,d): i.__dict__.update(d); return i
def plus(i,**d): i.override(d)
def __repr__(i):
d = i.__dict__
name = i.__class__.__name__
return name+'{'+' '.join([':%s %s' % (k,pretty(d[k]))
for k in i.show()])+ '}'
def show(i):
return [k for k in sorted(i.__dict__.keys())
if not "_" in k]
def tunings( _ = None):
return dict(
Flex= [5.07, 4.05, 3.04, 2.03, 1.01, _],
Pmat= [7.80, 6.24, 4.68, 3.12, 1.56, _],
Prec= [6.20, 4.96, 3.72, 2.48, 1.24, _],
Resl= [7.07, 5.65, 4.24, 2.83, 1.41, _],
Team= [5.48, 4.38, 3.29, 2.19, 1.01, _],
acap= [1.42, 1.19, 1.00, 0.85, 0.71, _],
aexp= [1.22, 1.10, 1.00, 0.88, 0.81, _],
cplx= [0.73, 0.87, 1.00, 1.17, 1.34, 1.74],
data= [ _, 0.90, 1.00, 1.14, 1.28, _],
docu= [0.81, 0.91, 1.00, 1.11, 1.23, _],
ltex= [1.20, 1.09, 1.00, 0.91, 0.84, _],
pcap= [1.34, 1.15, 1.00, 0.88, 0.76, _],
pcon= [1.29, 1.12, 1.00, 0.90, 0.81, _],
plex= [1.19, 1.09, 1.00, 0.91, 0.85, _],
pvol= [ _, 0.87, 1.00, 1.15, 1.30, _],
rely= [0.82, 0.92, 1.00, 1.10, 1.26, _],
ruse= [ _, 0.95, 1.00, 1.07, 1.15, 1.24],
sced= [1.43, 1.14, 1.00, 1.00, 1.00, _],
site= [1.22, 1.09, 1.00, 0.93, 0.86, 0.80],
stor= [ _, _, 1.00, 1.05, 1.17, 1.46],
time= [ _, _, 1.00, 1.11, 1.29, 1.63],
tool= [1.17, 1.09, 1.00, 0.90, 0.78, _])
Features=dict(Sf=[ 'Prec','Flex','Resl','Team','Pmat'],
Prod=['rely','data','cplx','ruse','docu'],
Platform=['time','stor','pvol'],
Person=['acap','pcap','pcon','aexp','plex','ltex'],
Project=['tool','site','sced'])
def options():
return Thing(levels=10,samples=20,shrink=0.66,round=2,epsilon=0.00,
guesses=1000)
Features=dict(Sf=[ 'Prec','Flex','Resl','Team','Pmat'],
Prod=['rely','data','cplx','ruse','docu'],
Platform=['time','stor','pvol'],
Person=['acap','pcap','pcon','aexp','plex','ltex'],
Project=['tool','site','sced'])
def has(x,lst):
try:
out=lst.index(x)
return out
except ValueError:
return None
def nasa93(opt=options(),tunings=tunings()):
vl=1;l=2;n=3;h=4;vh=5;xh=6
return Thing(
sfem=21,
kloc=22,
effort=23,
names= [
# 0..8
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
# 9 .. 17
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
# 18 .. 25
'ltex', 'tool', 'site', 'sced', 'kloc', 'effort', '?defects', '?months'],
projects=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,8.2,36,256,10.4],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,9.7,25.2,302,11.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,2.2,8.4,69,6.6],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,3.5,10.8,109,7.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,352.8,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,xh,xh,l,h,h,n,h,n,h,h,n,n,7.5,72,226,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,20,72,566,14.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,6,24,188,9.9],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,100,360,2832,25.2],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,vh,n,l,n,n,n,11.3,36,456,12.8],
[h,h,h,vh,n,n,l,h,n,n,n,n,h,h,h,n,h,l,vl,n,n,n,100,215,5434,30.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,20,48,626,15.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,n,n,vl,n,n,n,100,360,4342,28.0],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,vh,n,vh,n,h,n,n,n,150,324,4868,32.5],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,h,n,h,n,n,n,31.5,60,986,17.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,15,48,470,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,n,n,h,n,h,n,n,n,32.5,60,1276,20.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,19.7,60,614,13.9],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,300,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,29.5,120,920,16.0],
[h,h,h,vh,n,h,n,n,n,n,h,n,n,n,h,n,h,n,n,n,n,n,15,90,575,15.2],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,h,n,h,n,n,n,n,n,38,210,1553,21.3],
[h,h,h,vh,n,n,n,n,n,n,n,n,n,n,h,n,h,n,n,n,n,n,10,48,427,12.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,15.4,70,765,14.5],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,48.5,239,2409,21.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,16.3,82,810,14.8],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,12.8,62,636,13.6],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,32.6,170,1619,18.7],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,35.5,192,1763,19.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,5.5,18,172,9.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,10.4,50,324,11.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,14,60,437,12.4],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,6.5,42,290,12.0],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,13,60,683,14.8],
[h,h,h,vh,h,n,n,h,n,n,n,n,n,n,h,n,n,n,h,h,n,n,90,444,3343,26.7],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,8,42,420,12.5],
[h,h,h,vh,n,n,n,h,n,n,h,n,n,n,n,n,n,n,n,n,n,n,16,114,887,16.4],
[h,h,h,vh,h,n,h,h,n,n,vh,h,l,h,h,n,n,l,h,n,n,l,177.9,1248,7998,31.5],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,h,n,n,n,n,n,n,n,302,2400,8543,38.4],
[h,h,h,vh,h,n,h,l,n,n,n,n,h,h,n,n,h,n,n,h,n,n,282.1,1368,9820,37.3],
[h,h,h,vh,h,h,h,l,n,n,n,n,n,h,n,n,h,n,n,n,n,n,284.7,973,8518,38.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,l,n,h,n,h,n,h,n,n,n,79,400,2327,26.9],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,h,vh,n,h,n,h,n,n,n,423,2400,18447,41.9],
[h,h,h,vh,h,n,n,n,n,n,n,n,l,h,vh,n,vh,l,h,n,n,n,190,420,5092,30.3],
[h,h,h,vh,h,n,n,h,n,n,n,h,n,h,n,n,h,n,h,n,n,n,47.5,252,2007,22.3],
[h,h,h,vh,l,vh,n,xh,n,n,h,h,l,n,n,n,h,n,n,h,n,n,21,107,1058,21.3],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,78,571.4,4815,30.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,11.4,98.8,704,15.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,19.3,155,1191,18.6],
[h,h,h,vh,l,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,101,750,4840,32.4],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,219,2120,11761,42.8],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,50,370,2685,25.4],
[h,h,h,vh,h,vh,h,h,n,n,vh,vh,n,vh,vh,n,vh,n,h,h,n,l,227,1181,6293,33.8],
[h,h,h,vh,h,n,h,vh,n,n,n,n,l,h,vh,n,n,l,n,n,n,l,70,278,2950,20.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,0.9,8.4,28,4.9],
[h,h,h,vh,l,vh,l,xh,n,n,xh,vh,l,h,h,n,vh,vl,h,n,n,n,980,4560,50961,96.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,vh,vh,n,n,h,h,n,n,n,350,720,8547,35.7],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,70,458,2404,27.5],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,271,2460,9308,43.4],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,90,162,2743,25.0],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,40,150,1219,18.9],
[h,h,h,vh,n,h,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,137,636,4210,32.2],
[h,h,h,vh,n,h,n,h,n,n,h,n,h,h,h,n,h,n,h,n,n,n,150,882,5848,36.2],
[h,h,h,vh,n,vh,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,339,444,8477,45.9],
[h,h,h,vh,n,l,h,l,n,n,n,n,h,h,h,n,h,n,h,n,n,n,240,192,10313,37.1],
[h,h,h,vh,l,h,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,144,576,6129,28.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,151,432,6136,26.2],
[h,h,h,vh,l,n,l,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,34,72,1555,16.2],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,98,300,4907,24.4],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,85,300,4256,23.2],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,20,240,813,12.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,111,600,4511,23.5],
[h,h,h,vh,l,h,vh,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,162,756,7553,32.4],
[h,h,h,vh,l,h,h,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,352,1200,17597,42.9],
[h,h,h,vh,l,h,n,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,165,97,7867,31.5],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,60,409,2004,24.9],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,100,703,3340,29.6],
[h,h,h,vh,n,h,vh,vh,n,n,xh,xh,h,n,n,n,n,l,l,n,n,n,32,1350,2984,33.6],
[h,h,h,vh,h,h,h,h,n,n,vh,xh,h,h,h,n,h,h,h,n,n,n,53,480,2227,28.8],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,41,599,1594,23.0],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,24,430,933,19.2],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,165,4178.2,6266,47.3],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,65,1772.5,2468,34.5],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,70,1645.9,2658,35.4],
[h,h,h,vh,h,vh,h,xh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,50,1924.5,2102,34.2],
[h,h,h,vh,l,vh,l,vh,n,n,vh,xh,l,h,n,n,l,vl,l,h,n,n,7.25,648,406,15.6],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,233,8211,8848,53.1],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n,16.3,480,1253,21.5],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 6.2, 12,477,15.4],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 3.0, 38,231,12.0],
])
def coc81(opt=options(),tunings=tunings()):
vl=1;l=2;n=3;h=4;vh=5;xh=6
return Thing(
sfem=21,
kloc=22,
effort=23,
names= [
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
'ltex', 'tool', 'site', 'sced', 'kloc', 'effort', '?defects', '?months'],
projects=[
[h,h,h,vh,vl,l,vh,vl,n,n,n,h,h,l,l,n,l,l,n,vl,h,n,113,2040,13027,38.4],
[h,h,h,vh,vl,l,vh,l,n,n,n,h,n,n,n,n,h,h,h,vl,h,n,293,1600,25229,48.6],
[h,h,h,vh,n,n,vh,l,n,n,n,n,l,h,h,n,vh,h,h,l,h,n,132,243,3694,28.7],
[h,h,h,vh,vl,vl,vh,vl,n,n,n,n,l,l,vl,n,h,n,h,vl,h,n,60,240,5688,28.0],
[h,h,h,vh,vl,l,l,n,n,n,n,n,l,n,h,n,n,h,h,vl,h,n,16,33,970,14.3],
[h,h,h,vh,vl,vl,n,l,n,n,n,vh,n,vl,vl,n,n,h,h,vl,h,n,4,43,553,11.6],
[h,h,h,vh,n,vl,n,n,n,n,n,n,l,n,n,n,n,h,h,l,h,n,6.9,8,350,10.3],
[h,h,h,vh,vl,h,l,vh,n,n,xh,xh,vh,vh,n,n,h,vl,vl,vl,h,l,22,1075,3511,24.5],
[h,h,h,vh,n,h,l,vh,n,n,vh,vh,h,h,h,n,n,l,l,vl,h,n,30,423,1989,24.1],
[h,h,h,vh,l,vh,l,vh,n,n,h,xh,n,h,h,n,vh,h,n,vl,h,n,29,321,1496,23.2],
[h,h,h,vh,l,vh,l,vh,n,n,h,xh,n,h,h,n,vh,h,n,vl,h,n,32,218,1651,24.0],
[h,h,h,vh,n,h,l,vh,n,n,h,h,n,h,h,n,vh,n,h,vl,h,l,37,201,1783,19.1],
[h,h,h,vh,n,h,l,vh,n,n,h,h,h,vh,vh,n,n,l,n,vl,h,n,25,79,1138,18.4],
[h,h,h,vh,vl,h,l,xh,n,n,vh,xh,h,h,vh,n,n,l,l,vl,h,vl,3,60,387,9.4],
[h,h,h,vh,n,vh,l,vh,n,n,vh,h,h,h,h,n,l,vl,vl,vl,h,vl,3.9,61,276,9.5],
[h,h,h,vh,l,vh,n,vh,n,n,vh,xh,n,h,h,n,n,n,n,vl,h,n,6.1,40,390,14.9],
[h,h,h,vh,l,vh,n,vh,n,n,vh,xh,n,h,h,n,vh,n,n,vl,h,n,3.6,9,230,12.3],
[h,h,h,vh,vl,h,vh,h,n,n,vh,vh,n,h,n,n,n,n,n,vl,h,l,320,11400,34588,52.4],
[h,h,h,vh,n,h,h,n,n,n,h,vh,l,vh,n,n,h,n,n,l,h,n,1150,6600,41248,67.0],
[h,h,h,vh,vl,vh,h,vh,n,n,h,vh,h,vh,n,n,vh,l,l,vl,h,l,299,6400,30955,53.4],
[h,h,h,vh,n,n,vh,h,n,n,n,n,l,h,n,n,n,n,n,l,h,n,252,2455,11664,40.8],
[h,h,h,vh,n,h,n,n,n,n,n,h,n,h,h,n,vh,h,n,vl,h,vl,118,724,5172,21.7],
[h,h,h,vh,l,h,n,n,n,n,n,h,n,h,h,n,vh,h,n,vl,h,vl,77,539,4362,19.5],
[h,h,h,vh,n,l,n,l,n,n,n,h,n,n,n,n,vl,l,h,n,h,n,90,453,4407,27.1],
[h,h,h,vh,n,h,vh,vh,n,n,n,h,n,h,h,n,n,l,n,l,h,l,38,523,2269,20.2],
[h,h,h,vh,n,n,n,l,n,n,n,h,h,h,h,n,n,l,n,vl,h,l,48,387,2419,18.5],
[h,h,h,vh,n,h,l,h,n,n,n,vh,n,n,n,n,n,n,n,vl,h,l,9.4,88,517,12.1],
[h,h,h,vh,vl,h,h,vh,n,n,h,vh,h,h,h,n,n,l,l,vl,h,n,13,98,1473,19.6],
[h,h,h,vh,n,l,n,n,n,n,n,n,n,n,h,n,vl,n,n,l,h,vl,2.14,7.3,138,5.3],
[h,h,h,vh,n,l,n,n,n,n,n,n,n,n,h,n,vl,n,n,l,h,vl,1.98,5.9,128,5.2],
[h,h,h,vh,l,vh,h,n,n,n,n,xh,h,h,h,n,vh,l,l,vl,h,n,62,1063,3682,32.8],
[h,h,h,vh,vl,l,h,l,n,n,n,n,n,vh,n,n,vh,n,n,vl,h,n,390,702,30484,45.8],
[h,h,h,vh,n,vh,h,vh,n,n,n,xh,h,h,h,n,vh,h,n,l,h,n,42,605,1803,27.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,n,n,n,n,n,n,n,vl,h,vl,23,230,1271,14.2],
[h,h,h,vh,vl,vl,l,vh,n,n,n,vh,h,n,n,n,h,l,n,vl,h,n,13,82,2250,17.2],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,l,l,n,n,h,h,l,h,n,15,55,1004,15.8],
[h,h,h,vh,l,l,l,vl,n,n,n,h,n,h,h,n,vh,n,n,vl,h,n,60,47,2883,20.3],
[h,h,h,vh,n,n,n,h,n,n,n,n,l,vh,n,n,h,h,h,l,h,n,15,12,504,13.5],
[h,h,h,vh,n,n,n,h,n,n,n,n,l,vh,vh,n,vh,n,h,vl,h,n,6.2,8,197,9.6],
[h,h,h,vh,vl,n,l,vh,n,n,n,n,n,h,l,n,vh,n,n,vl,h,n,n,8,294,9.5],
[h,h,h,vh,n,l,l,n,n,n,n,n,l,n,vh,n,vh,h,h,l,h,n,5.3,6,173,8.7],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,h,n,n,n,h,h,vl,h,n,45.5,45,2645,21.0],
[h,h,h,vh,l,n,n,n,n,n,n,vh,l,h,n,n,n,h,h,vl,h,n,28.6,83,1416,18.9],
[h,h,h,vh,vl,l,n,n,n,n,n,vh,l,n,n,n,n,h,h,vl,h,n,30.6,87,2444,20.5],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,n,n,n,n,h,h,vl,h,n,35,106,2198,20.1],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,n,h,n,n,h,h,vl,h,n,73,126,4188,25.1],
[h,h,h,vh,vl,vl,l,vh,n,n,n,n,l,vh,vh,n,vh,l,l,vl,h,n,23,36,2161,15.6],
[h,h,h,vh,vl,l,l,l,n,n,n,n,l,l,l,n,h,h,h,vl,h,n,464,1272,32002,53.4],
[h,h,h,vh,n,n,n,l,n,n,n,n,n,vh,vh,n,n,l,n,l,h,n,91,156,2874,22.6],
[h,h,h,vh,l,h,n,n,n,n,vh,vh,n,h,h,n,n,l,n,vl,h,n,24,176,1541,20.3],
[h,h,h,vh,vl,l,n,n,n,n,n,n,n,l,vl,n,n,n,h,vl,h,n,10,122,1225,16.2],
[h,h,h,vh,vl,l,l,l,n,n,n,h,h,n,n,n,n,l,l,vl,h,n,8.2,41,855,13.1],
[h,h,h,vh,l,l,l,h,n,n,h,vh,vh,vh,vh,n,n,l,l,vl,h,l,5.3,14,533,9.3],
[h,h,h,vh,n,n,l,n,n,n,n,h,h,n,n,n,vh,n,h,vl,h,n,4.4,20,216,10.6],
[h,h,h,vh,vl,l,l,vl,n,n,n,n,l,h,l,n,vh,h,h,vl,h,n,6.3,18,309,9.6],
[h,h,h,vh,vl,h,l,vh,n,n,vh,vh,n,h,n,n,h,l,l,vl,h,l,27,958,3203,21.1],
[h,h,h,vh,vl,n,l,h,n,n,h,vh,vh,n,n,n,n,l,l,vl,h,vl,17,237,2622,16.0],
[h,h,h,vh,n,vh,l,vh,n,n,xh,vh,n,vh,vh,n,vh,h,h,vl,h,n,25,130,813,20.9],
[h,h,h,vh,n,n,l,h,n,n,n,h,n,n,n,n,n,n,n,vl,h,n,23,70,1294,18.2],
[h,h,h,vh,vl,h,l,vh,n,n,h,h,n,h,h,n,l,l,l,vl,h,l,6.7,57,650,11.3],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,n,h,n,vl,h,n,28,50,997,16.4],
[h,h,h,vh,n,l,l,vh,n,n,h,vh,h,n,vh,n,vh,vl,vl,vl,h,n,9.1,38,918,15.3],
[h,h,h,vh,n,n,l,h,n,n,n,n,n,vh,h,n,vh,n,n,vl,h,n,10,15,418,11.6],
])
def sdiv(lst, tiny=3,cohen=0.3,
num1=lambda x:x[0], num2=lambda x:x[1]):
"Divide lst of (num1,num2) using variance of num2."
#----------------------------------------------
class Counts(): # Add/delete counts of numbers.
def __init__(i,inits=[]):
i.zero()
for number in inits: i + number
def zero(i): i.n = i.mu = i.m2 = 0.0
def sd(i) :
if i.n < 2: return i.mu
else:
return (max(0,i.m2)*1.0/(i.n - 1))**0.5
def __add__(i,x):
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
def __sub__(i,x):
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta/(1.0*i.n)
i.m2 -= delta*(x - i.mu)
#----------------------------------------------
def divide(this,small): #Find best divide of 'this'
lhs,rhs = Counts(), Counts(num2(x) for x in this)
n0, least, cut = 1.0*rhs.n, rhs.sd(), None
for j,x in enumerate(this):
if lhs.n > tiny and rhs.n > tiny:
maybe= lhs.n/n0*lhs.sd()+ rhs.n/n0*rhs.sd()
if maybe < least :
if abs(lhs.mu - rhs.mu) >= small:
cut,least = j,maybe
rhs - num2(x)
lhs + num2(x)
return cut,least
#----------------------------------------------
def recurse(this, small,cuts):
cut,sd = divide(this,small)
if cut:
recurse(this[:cut], small, cuts)
recurse(this[cut:], small, cuts)
else:
cuts += [(sd * len(this)/len(lst),this)]
return cuts
#---| main |-----------------------------------
small = Counts(num2(x) for x in lst).sd()*cohen
if lst:
return recurse(sorted(lst,key=num1),small,[])
def fss(d=coc81(),want=0.25):
rank=[]
for i in range(d.sfem):
xs=sdiv(d.projects,
num1=lambda x:x[i],
num2=lambda x:x[d.effort])
xpect = sum(map(lambda x: x[0],xs))
rank += [(xpect,i)]
rank = sorted(rank)
keep = int(len(rank)*want)
doomed= map(lambda x:x[1], rank[keep:])
for project in d.projects:
for col in doomed:
project[col] = 3
return d
def less(d=coc81(),n=2):
skipped = 0
names0 = d.names
toUse,doomed = [],[]
for v in Features.values():
toUse += v[:n]
for n,name in enumerate(names0):
if n >= d.sfem:
break
if not has(name,toUse):
doomed += [n]
for project in d.projects:
for col in doomed:
project[col] = 3
return d
def meanr(lst):
total=n=0.00001
for x in lst:
if not x == None:
total += x
n += 1
return total/n
def tothree(lst):
below=lst[:2]
above=lst[3:]
m1 = meanr(below)
m2= meanr(above)
below = [m1 for _ in below]
above = [m2 for _ in above]
return below + [lst[2]] + above
def rr3(lst):
#return lst
r = 1
if lst[0]> 2 : r = 0
def rr1(n): return round(x,r) if x else None
tmp= tothree([rr1(x) for x in lst])
return tmp
def rr5(lst):
if lst[0] > 2:
return [6,5,4,3,2,1]
if lst[0] < 0:
return [0.8, 0.9, 1, 1.1, 1.2, 1.3]
return [1.2,1.1,1,0.9,0.8,0.7]
def rrs5(d):
for k in d: d[k] = rr5(d[k])
return d
def rrs3(d):
for k in d: d[k] = rr3(d[k])
return d
def detune(m,tun=tunings()):
def best(at,one,lst):
least,x = 100000,None
for n,item in enumerate(lst):
if item:
tmp = abs(one - item)
if tmp < least:
least = tmp
x = n
return x
def detuned(project):
for n,(name,val) in enumerate(zip(m.names,project)):
if n <= m.sfem:
project[n] = best(n,val,tun[name]) + 1
return project
m.projects = [detuned(project) for
project in m.projects]
for p in m.projects: print p
return m
#########################################
# begin code
## imports
import random,math,sys
r = random.random
any = random.choice
seed = random.seed
exp = lambda n: math.e**n
ln = lambda n: math.log(n,math.e)
g = lambda n: round(n,2)
def say(x):
sys.stdout.write(str(x))
sys.stdout.flush()
def nl(): print ""
## classes
class Score(Thing):
def finalize(i) :
i.all = []
i.residuals=[]
i.raw=[]
i.use=False
def seen(i,got,want):
i.residuals += [abs(got - want)]
i.raw += [got - want]
tmp = i.mre(got,want)
i.all += [tmp]
return tmp
def mar(i):
return median(sorted(i.residuals))
#return sum(i.residuals) / len(i.residuals)
def sanity(i,baseline):
return i.mar()*1.0/baseline
def mre(i,got,want):
return abs(got- want)*1.0/(0.001+want)
def mmre(i):
return sum(i.all)*1.0/len(i.all)
def medre(i):
return median(sorted(i.all))
def pred(i,n=30):
total = 0.0
for val in i.all:
if val <= n*0.01: total += 1
return total*1.0/len(i.all)
## low-level utils
def pretty(s):
if isinstance(s,float):
return '%.3f' % s
else: return '%s' % s
def stats(l,ordered=False):
if not ordered: l= sorted(l)
p25= l[len(l)/4]
p50= l[len(l)/2]
p75= l[len(l)*3/4]
p100= l[-1]
print p50, p75-p25, p100
## mode prep
def valued(d,opt,t=tunings()):
for old in d.projects:
for i,name in enumerate(d.names):
if i <= d.sfem:
tmp = old[i]
if not isinstance(tmp,float):
tmp = old[i] - 1
old[i] = round(t[name][tmp],opt.round)
return d
####################################
def median(lst,ordered=False):
if not ordered: lst= sorted(lst)
n = len(lst)
if n==0: return 0
if n==1: return lst[0]
if n==2: return (lst[0] + lst[1])*0.5
if n % 2: return lst[n//2]
n = n//2
return (lst[n] + lst[n+1]) * 0.5
class Count:
def __init__(i,name="counter"):
i.name=name
i.lo = 10**32
i.hi= -1*10**32
i._all = []
i._also = None
def keep(i,n):
i._also= None
if n > i.hi: i.hi = n
if n < i.lo: i.lo = n
i._all += [n]
def centroid(i):return i.also().median
def all(i): return i.also().all
def also(i):
if not i._also:
i._all = sorted(i._all)
if not i._all:
i._also = Thing(all=i._all,
median=0)
else:
i._also = Thing(all=i._all,
median=median(i._all))
return i._also
def norm(i,n):
#return n
return (n - i.lo)*1.0 / (i.hi - i.lo + 0.0001)
def clone(old,data=[]):
return Model(map(lambda x: x.name,old.headers),
data)
class Model:
def __init__(i,names,data=[],indep=0):
i.indep = indep
i.headers = [Count(name) for name in names]
i._also = None
i.rows = []
for row in data: i.keep(row)
def centroid(i): return i.also().centroid
def xy(i) : return i.also().xy
def also(i):
if not i._also:
xs, ys = 0,0
for row in i.rows:
xs += row.x
ys += row.y
n = len(i.rows)+0.0001
i._also= Thing(
centroid= map(lambda x: x.centroid(),
i.headers),
xy = (xs/n, ys/n))
return i._also
def keep(i,row):
i._also = None
if isinstance(row,Row):
content=row.cells
else:
content=row
row = Row(cells=row)
for cell,header in zip(content,i.headers):
header.keep(cell)
i.rows += [row]
class Row(Thing):
def finalize(i):
i.x = i.y = 0
def xy(i,x,y):
if not i.x:
i.x, i.y = x,y
def lo(m,x) : return m.headers[x].lo
def hi(m,x) : return m.headers[x].hi
def norm(m,x,n) : return m.headers[x].norm(n)
def cosineRule(z,m,c,west,east,slots):
a = dist(m,z,west,slots)
b = dist(m,z,east,slots)
x= (a*a + c*c - b*b)/(2*c+0.00001) # cosine rule
y= max(0,a**2 - x**2)**0.5
return x,y
def fastmap(m,data,slots):
"Divide data into two using distance to two distant items."
one = any(data) # 1) pick anything
west = furthest(m,one,data,slots) # 2) west is as far as you can go from anything
east = furthest(m,west,data,slots) # 3) east is as far as you can go from west
c = dist(m,west,east,slots)
# now find everyone's distance
lst = []
for one in data:
x,y= cosineRule(one,m,c,west,east,slots)
one.xy(x,y)
lst += [(x, one)]
lst = sorted(lst)
wests,easts = [], []
cut = len(lst) // 2
cutx = lst[cut][0]
for x,one in lst:
what = wests if x <= cutx else easts
what += [one]
return wests,west, easts,east,cutx,c
def dist(m,i,j,slots):
"Euclidean distance 0 <= d <= 1 between decisions"
d1,d2 = slots.what(i), slots.what(j)
n = len(d1)
deltas = 0
for d in range(n):
n1 = norm(m, d, d1[d])
n2 = norm(m, d, d2[d])
inc = (n1-n2)**2
deltas += inc
return deltas**0.5 / n**0.5
def furthest(m,i,all,slots,
init = 0,
better = lambda x,y: x>y):
"find which of all is furthest from 'i'"
out,d= i,init
for j in all:
if not i == j:
tmp = dist(m,i,j,slots)
if better(tmp,d): out,d = j,tmp
return out
def myCentroid(row,t):
x1,y1=row.x,row.y
out,d=None,10**32
for leaf in leaves(t):
x2,y2=leaf.m.xy()
tmp = ((x2-x1)**2 + (y2-y1)**2)**0.5
if tmp < d:
out,d=leaf,tmp
return out
def centroid2(row,t):
x1,y1=row.x,row.y
out=[]
for leaf in leaves(t):
x2,y2 = leaf.m.xy()
tmp = ((x2-x1)**2 + (y2-y1)**2)**0.5
out += [(tmp,leaf)]
out = sorted(out)
if len(out)==0:
return [(None,None),(None,None)]
if len(out) ==1:
return out[0],out[0]
else:
return out[0],out[1]
def where0(**other):
return Thing(minSize = 10, # min leaf size
depthMin= 2, # no pruning till this depth
depthMax= 10, # max tree depth
b4 = '|.. ', # indent string
verbose = False, # show trace info?
what = lambda x: x.cells
).override(other)
def where(m,data,slots=None):
slots = slots or where0()
return where1(m,data,slots,0,10**32)
def where1(m, data, slots, lvl, sd0,parent=None):
here = Thing(m=clone(m,data),
up=parent,
_west=None,_east=None,leafp=False)
def tooDeep(): return lvl > slots.depthMax
def tooFew() : return len(data) < slots.minSize
def show(suffix):
if slots.verbose:
print slots.b4*lvl + str(len(data)) + suffix
if tooDeep() or tooFew():
show(".")
here.leafp=True
else:
show("1")
wests,west, easts,east,cut,c = fastmap(m,data,slots)
here.plus(c=c, cut=cut, west=west, east=east)
sd1=Num("west",[slots.klass(w) for w in wests]).spread()
sd2=Num("east",[slots.klass(e) for e in easts]).spread()
goWest = goEast = True
if lvl > 0:
goWest = sd1 < sd0
goEast = sd2 < sd0
if goWest:
here._west = where1(m, wests, slots, lvl+1, sd1,here)
if goEast:
here._east = where1(m, easts, slots, lvl+1, sd2,here)
return here
def leaf(t,row,slots,lvl=1):
if t.leafp:
return t
else:
x,_ = cosineRule(row, t.m, t.c,t.west,t.east,slots)
return leaf(t._west if x <= t.cut else t._east,
row,slots,lvl+1)
def preOrder(t):
if t:
yield t
for kid in [t._west,t._east]:
for out in preOrder(kid):
yield out
def leaves(t):
for t1 in preOrder(t):
if t1.leafp:
yield t1
def tprint(t,lvl=0):
if t:
print '|.. '*lvl + str(len(t.m.rows)), '#'+str(t._id)
tprint(t._west,lvl+1)
tprint(t._east,lvl+1)
import sys,math,random
sys.dont_write_bytecode = True
def go(f):
"A decorator that runs code at load time."
print "\n# ---|", f.__name__,"|-----------------"
if f.__doc__: print "#", f.__doc__
f()
# random stuff
seed = random.seed
any = random.choice
# pretty-prints for list
def gs(lst) : return [g(x) for x in lst]
def g(x) : return float('%.4f' % x)
"""
### More interesting, low-level stuff
"""
def timing(f,repeats=10):
"How long does 'f' take to run?"
import time
time1 = time.clock()
for _ in range(repeats):
f()
return (time.clock() - time1)*1.0/repeats
def showd(d):
"Pretty print a dictionary."
def one(k,v):
if isinstance(v,list):
v = gs(v)
if isinstance(v,float):
return ":%s %g" % (k,v)
return ":%s %s" % (k,v)
return ' '.join([one(k,v) for k,v in
sorted(d.items())
if not "_" in k])
####################################
## high-level business knowledge
def effort(d,project, a=2.94,b=0.91):
"Primitive estimation function"
def sf(x) : return x[0].isupper()
sfs , ems = 0.0, 1.0
kloc = project[d.kloc]
i = -1
for name,val in zip(d.names,project):
i += 1
if i > d.sfem : break
if sf(name):
sfs += val
else:
ems *= val
return a*kloc**(b + 0.01*sfs) * ems
def cart(train,test,most):
from sklearn import tree
indep = map(lambda x: x[:most+1], train)
dep = map(lambda x: x[most+1], train)
t = tree.DecisionTreeRegressor(random_state=1).fit(indep,dep)
return t.predict(test[:most+1])[0]
def nc(n):
return True #say(chr(ord('a') + n))
def loo(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,
s12,s13,s14,s15,s16,s17,s18,s19,s20,s21,s22,s23,s24,s25,s26,
s27,s28,s29,s30,s31,s32,s33,s34,s35,s36,s37,s38,s39,s40,s41,
s42,s43,s44,s45,s46,s47,s48,s49,
model=nasa93,t=tunings(),opt=None,detuning=True
):
"Leave one-out"
if opt == None: opt=options()
d= model(opt)
for i,project in enumerate(d.projects):
want = project[d.effort]
them = d.projects[:i] + d.projects[i+1:]
if s15.use:
nc(15)
got15=knn(model(),them,project,opt,5); s15.seen(got15,want)
if s16.use:
nc(16)
got16=knn(model(),them,project,opt,3); s16.seen(got16,want)
if s17.use:
nc(17)
got17=knn(model(),them,project,opt,1); s17.seen(got17,want)
#say(0)
if s5.use or s7.use:
nc(5)
got5,got7 = vasil(model,them,project); s5.seen(got5,want); s7.seen(got7,want)
#say(1)
if s1.use:
nc(1)
got1 = wildGuess(d,them,opt); s1.seen(got1,want)
#say(2)
if s4.use:
nc(4)
got4 = cart(them, project,d.kloc); s4.seen(got4,want)
#say(5)
if s8.use:
nc(8)
got8 = loc(d,them,project,3); s8.seen(got8,want)
if s18.use:
nc(18)
got18 = loc(d,them,project,1); s18.seen(got18,want)
#say(6)
if s9.use or s10.use or s19.use or s20.use or s21.use or s22.use:
project1 = project[:]
project1[d.kloc]=0
them1=[]
for one in them:
tmp=one[:]
tmp[d.kloc]=0
them1 += [tmp]
if s9.use or s10.use:
nc(9)
got9,got10 = vasil(model,them1,project1);
s9.seen(got9,want); s10.seen(got10,want)
if s19.use:
nc(19)
got19=knn(model(),them1,project1,opt,5); s19.seen(got19,want)
if s20.use:
nc(20)
got20=knn(model(),them1,project1,opt,3); s20.seen(got20,want)
if s21.use:
nc(21)
got21=knn(model(),them1,project1,opt,1); s21.seen(got21,want)
if s22.use:
nc(22)
got22=cart(them1, project1,d.kloc);s22.seen(got22,want)
if s2.use or s3.use:
d= model(opt)
d = valued(d,opt)
for i,project in enumerate(d.projects):
want = project[d.effort]
them = d.projects[:i] + d.projects[i+1:]
if s2.use:
nc(2)
got2 = effort(d,project,2.94,0.91); s2.seen(got2,want)
if s3.use:
nc(3)
a,b = coconut(d,them,opt);
got3 = effort(d,project,a,b); s3.seen(got3,want)
if s11.use or s12.use:
#if not detuning: return True
t=rrs3(tunings())
d=model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
#say(7)
if s11.use:
nc(11)
got11=effort(d,project,2.94,0.91); s11.seen(got11,want)
if s12.use:
nc(12)
a,b=coconut(d,them,opt)
#say(8)
got12= effort(d,project,a,b); s12.seen(got12,want)
if s23.use or s24.use or s25.use or s26.use:
t = rrs3(tunings())
d = model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
for n,s in [(8,s23), (12,s24), (16,s25),(4,s26)]:
nc(23)
them = shuffle(them)[:n]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s27.use or s28.use or s29:
for n,s in [(1,s27),(2,s28),(3,s29)]:
t = rrs3(tunings())
d = model()
d = less(d,n)
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
nc(28)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s30.use or s31.use or s32.use or s33.use or s34.use or s35.use or s36.use or s37.use or s38.use or s39.use or s40.use or s41.use:
for n1,n2,s in [(0.25,4,s30),(0.25,8,s31),(0.25,12,s32),(0.25,16,s33),
(0.5, 4,s34),(0.5, 8,s35),(0.5, 12,s36),(0.5, 16,s37),
(1,4,s38),(1,8,s39),(1,12,s40),(1,16,s41)]:
t = rrs3(tunings())
d = model()
d.projects = shuffle(d.projects)[:n2]
d = fss(d,n1)
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
nc(36)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s13.use or s14.use:
t=rrs5(tunings())
d=model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
#say(9)
if s13.use:
nc(13)
got13=effort(d,project,2.94,0.91); s13.seen(got13,want)
if s14.use:
nc(14)
a,b=coconut(d,them,opt)
#say("+")
got14= effort(d,project,a,b); s14.seen(got14,want)
if s42.use or s43.use or s44.use or s45.use or s46.use or s47.use or s48.use or s49.use:
n1 = 0.5
n2 = 8
for noise,(carts,cocs,nuts,nears) in [
(.25, ( s42, s44, s46, s48)),
(.5, ( s43, s45,s47, s49))
]:
t = rrs3(tunings())
d = model()
d.projects = shuffle(d.projects)[:n2]
d = fss(d,n1)
d = valued(d,opt,t=t)
for project in d.projects:
old = project[d.kloc]
new = old * ((1 - noise) + 2*noise*random.random())
project[d.kloc]= new
for i,project in enumerate(d.projects):
nc(42)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b=coconut(d,them,opt)
nuts.seen(effort(d,project,a,b) ,want)
carts.seen(cart(them, project,d.kloc),want)
cocs.seen(effort(d,project) ,want)
def loc(d,them,project,n):
me = project[d.kloc]
all= sorted([(abs(me-x[d.kloc]),x[d.effort]) for x in them])
one = two = three = four = five = all[0][1]
if len(them) > 1: two = all[1][1]
if len(them) > 2: three=all[2][1]
if len(them) > 3: four=all[3][1]
if len(them) > 4: five=all[4][1]
# look at that: mean works as well as triangular kernel
if n == 1 : return one
if n == 2 : return (one *2 + two*1)/3
if n == 3 : return (one*3 + two*2+ three*1)/6
if n == 4 : return (one * 4 + two * 3 + three * 2 + four * 1)/10
return (one*5 + two*4 + three*3 + four*2 + five*1)/15
# if n == 1 : return one
# if n == 2 : return (one *1 + two*1)/2
# if n == 3 : return (one*1 + two*1+ three*1)/3
# if n == 4 : return (one * 1 + two * 1 + three * 1 + four * 1)/4
# return (one*1 + two*1 + three*1 + four*1 + five*1)/5
def walk(lst):
lst = sorted([(median(x[1].all),x[0],x[1].all) for x in lst])
say( lst[0][1])
walk1(lst[0],lst[1:])
print ""
def walk1(this,those):
if those:
that=those[0]
_,n1=this[1], this[2]
w2,n2=that[1], that[2]
if mwu(n1,n2) :
say(" < "+ str(w2))
walk1(that,those[1:])
else:
say(" = " + str(w2))
walk1(("","",n1+n2),those[1:])
def a12slow(lst1,lst2,rev=True):
"how often is x in lst1 more than y in lst2?"
more = same = 0.0
for x in lst1:
for y in lst2:
if x==y : same += 1
elif rev and x > y : more += 1
elif not rev and x < y : more += 1
x= (more + 0.5*same) / (len(lst1)*len(lst2))
#if x > 0.71: return g(x),"B"
#if x > 0.64: return g(x),"M"
return x> 0.6 #g(x),"S"
def a12cmp(x,y):
if y - x > 0 : return 1
if y - x < 0 : return -1
else: return 0
a12s=0
def a12(lst1,lst2, gt= a12cmp):
"how often is x in lst1 more than y in lst2?"
global a12s
a12s += 1
def loop(t,t1,t2):
while t1.j < t1.n and t2.j < t2.n:
h1 = t1.l[t1.j]
h2 = t2.l[t2.j]
h3 = t2.l[t2.j+1] if t2.j+1 < t2.n else None
if gt(h1,h2) < 0:
t1.j += 1; t1.gt += t2.n - t2.j
elif h1 == h2:
if h3 and gt(h1,h3) < 0:
t1.gt += t2.n - t2.j - 1
t1.j += 1; t1.eq += 1; t2.eq += 1
else:
t2,t1 = t1,t2
return t.gt*1.0, t.eq*1.0
#--------------------------
lst1 = sorted(lst1, cmp=gt)
lst2 = sorted(lst2, cmp=gt)
n1 = len(lst1)
n2 = len(lst2)
t1 = Thing(l=lst1,j=0,eq=0,gt=0,n=n1)
t2 = Thing(l=lst2,j=0,eq=0,gt=0,n=n2)
gt,eq= loop(t1, t1, t2)
#print gt,eq,n1,n2
return gt/(n1*n2) + eq/2/(n1*n2)
class Counts(): # Add/delete counts of numbers.
def __init__(i,inits=[]):
i.n = i.mu = i.m2 = 0.0
for number in inits: i + number
def sd(i) :
if i.n < 2: return i.mu
else:
return (i.m2*1.0/(i.n - 1))**0.5
def __add__(i,x):
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
def wildGuess(d,projects,opt):
tally = 0
for _ in xrange(opt.guesses):
project = any(projects)
tally += project[d.effort]
return tally*1.0/opt.guesses
def coconut(d,tests,opt,lvl=None,err=10**6,
a=10,b=1,ar=10,br=0.5):
"Chase good a,b settings"
#return 2.94,0.91
def efforts(a,b):
s=Score()
for project in tests:
got = effort(d,project,a,b)
want = project[d.effort]
s.seen(got,want)
return s.mmre()
if lvl == None: lvl=opt.levels
if lvl < 1 : return a,b
old = err
for _ in range(opt.samples):
a1 = a - ar + 2*ar*r()
b1 = b - br + 2*br*r()
tmp = efforts(a1,b1)
if tmp < err:
a,b,err = a1,b1,tmp
if (old - err)/old < opt.epsilon:
return a,b
else:
return coconut(d,tests,opt,lvl-1,err, a=a,b=b,
ar=ar*opt.shrink,
br=br*opt.shrink)
## sampple main
def main(model=nasa93):
xseed(1)
for shrink in [0.66,0.5,0.33]:
for sam in [5,10,20]:
for lvl in [5,10,20]:
for rnd in [0,1,2]:
opt=options()
opt.shrink=shrink
opt.samples=sam
opt.round = rnd
opt.levels = lvl
loo(model=model,opt=opt)
#########################################
# start up code
def mwu(l1,l2):
import numpy as np
from scipy.stats import mannwhitneyu
#print "l1>",map(g,sorted(l1))
#print "l2>",map(g,sorted(l2))
_, p_value = mannwhitneyu(np.array(l1),
np.array(l2))
return p_value <= 0.05
# for e in [1,2,4]:
# print "\n"
# l1 = [r()**e for _ in xrange(100)]
# for y in [1.01,1.1,1.2,1.3,1.4, 1.5]:
# l2 = map(lambda x: x*y,l1)
# print e,y,mwu(l1,l2)
def test1(repeats=10,models=[coc81],what='locOrNot'):
seed(1)
print repeats,what,map(lambda x:x.__name__,models)
#for m in [ newCIIdata, xyz14,nasa93,coc81]:
import time
detune=False
for m in models:
#(newCIIdataDeTune,True),#, #,
# (xyz14deTune,True)
# #(coc81,True),
#(nasa93,True)
# ]:
s1=Score(); s2=Score(); s3=Score(); s4=Score();
s5=Score(); s6=Score(); s7=Score(); s8=Score()
s9=Score(); s10=Score(); s11=Score(); s12=Score();
s13=Score(); s14=Score();
s15=Score(); s16=Score(); s17=Score(); s18=Score()
s19=Score(); s20=Score(); s21=Score();
s22=Score()
s23=Score()
s24=Score(); s25=Score(); s26=Score()
s27=Score(); s28=Score(); s29=Score()
s30=Score(); s31=Score(); s32=Score()
s33=Score(); s34=Score(); s35=Score()
s36=Score(); s37=Score(); s38=Score()
s39=Score(); s40=Score(); s41=Score()
s42=Score(); s43=Score(); s44=Score()
s45=Score(); s46=Score(); s47=Score()
s48=Score(); s49=Score();
# loc or no loc
exps =dict(locOrNot = [("coc2000",s2),("coconut",s3),
("loc(3)",s8), ("loc(1)",s18),
#('knear(3)',s16), ("knear(3) noloc",s20),
#('knear(1)',s17),("knear(1) noloc",s21)
],
basicRun = [("coc2000",s2),("coconut",s3),
('knear(3)',s16),('knear(1)',s17),
#("cluster(1)",s5),
("cluster(2)",s7),
("cart",s4)],
qualitative= [("coc2000",s2),("coconut",s3),
#('knear(3)',s16),('knear(1)',s17),
("coco2000(simp)",s13), ("coconut(simp)",s14),
("coco2000(lmh)",s11), ("coconut(lmh)",s12)],
other = [('(c=1)n-noloc',s9),('(c=2)n-noloc',s10)],
less = [("coc2000",s2),("coconut",s3),
("coco2000(lmh)",s11), ("coconut(lmh)",s12),
('coconut(lmh8)',s23),('coconut(lmh12)',s24),
('coconut(lmh16)',s25),
('coconut(lmh4)',s26)],
lessCols = [("coc2000",s2),("coconut",s3),
('coconut(just5)',s27),
('coconut(just10)',s28),
('coconut(just15)',s29)],
fssCols = [("coc2000",s2),("coconut",s3),
('coconut:c*0.25,r=4',s30),
('coconut:c*0.25,r=8',s31),
#('coconut:c*0.25,r=12',s32),
#('coconut:c*0.25,r=16',s33),
('coconut:c*0.5,r=4',s34),
('coconut:c*0.5,r=8',s35),
#('coconut:c*0.5,r=12',s36),
#('coconut:c*1,r=16',s37),
('coconut:c*1,r=4',s38),
('coconut:c*1,r=8',s39),
#('coconut:c*1,r=12',s40),
#('coconut:c*1,r=16',s41)
],
noise = [ ("cart",s4), ("cart/4",s42), ("cart/2",s43),
("coc2000",s2), ("coc2000n/4",s44), ("coc2000n/2",s45),
('coconut:c*0.5,r=8',s35), ('coconut:c*0.5r=8n/4',s46) , ('coconut:c*0.5,r=8n/2',s47),
('knear(1)',s17), ('knear(1)/4',s48), ('knear(1)/2',s49)
]
)
lst = exps[what]
print '%',what
for _,s in lst: s.use=True
t1=time.clock()
print "\n\\subsection{%s}" % m.__name__
say("%")
for i in range(repeats):
say(' ' + str(i))
loo(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13,
s14,s15,s16,s17,s18,s19,s20,s21,s22,s23,s24,s25,s26,
s27,s28,s29,s30,s31,s32,
s33,s34,s35,s36,s37,s38,s39,s40,s41,s42,s43,s44,s45,s46,s47,s48,s49,
model=m,detuning=detune)
global bs
global a12s
bs = a12=0
t2 = time.clock()
print "="
rdivDemo([[x[0]] + x[1].all for x in lst if x[1].all])
t3 = time.clock()
print "\n :learn",t2-t1,":analyze",t3-t2,":boots",bs,"effects",a12s,":conf",0.99**bs
#print 'B>', bootstrap([1,2,3,4,5],[1,2,3,4,5])
def knn(src,them,project,opt,k):
slots = where0(what= lambda x:cocVals(x,src.effort))
m0=Model(src.names,src.projects)
m1=clone(m0,them)
w = [None]*k
ws = 0
for i in range(k): ws += i+1
for i in range(k): w[i] = (i+1)/ws
w.reverse()
#w = [1/k]*k
dists =[(dist(m1,Row(cells=that),Row(cells=project),slots),that[src.effort])
for that in them]
est = 0
for w1,(_,x) in zip(w,sorted(dists)[:k]):
est += w1*x
return est
def cocVals(row,n):
if isinstance(row,Row):
row=row.cells
return row[:n]
def vasil(src,data,project):
all = src()
m0 = Model(all.names,all.projects)
m1 = clone(m0,data)
e = all.effort
slots = where0(what= lambda x:cocVals(x,e)
,klass=lambda x:x.cells[all.effort])
t = where(m1,m1.rows,slots)
row = Row(cells=project)
got1 = got2 = Num(slots.klass(r) for r in data).median()
(d1,c1),(d2,c2) = centroid2(row,t)
if c1 or c2:
w1,w2 = 1/(d1+0.0001), 1/(d2+0.0001)
e1 = c1.m.centroid()[e]
e2 = c2.m.centroid()[e]
got2 = (w1*e1 + w2*e2) / (w1+w2)
got1=myCentroid(row,t).m.centroid()[e]
#got1b=leaf(t,row,slots).m.centroid()[e]
return got1,got2
class Num:
"An Accumulator for numbers"
def __init__(i,name,inits=[]):
i.n = i.m2 = i.mu = 0.0
i.all=[]
i._median=None
i.name = name
i.rank = 0
for x in inits: i.add(x)
def s(i) : return (i.m2/(i.n - 1))**0.5
def add(i,x):
i._median=None
i.n += 1
i.all += [x]
delta = x - i.mu
i.mu += delta*1.0/i.n
i.m2 += delta*(x - i.mu)
def __add__(i,j):
return Num(i.name + j.name,i.all + j.all)
def quartiles(i):
def p(x) : return int(100*g(xs[x]))
i.median()
xs = i.all
n = int(len(xs)*0.25)
return p(n) , p(2*n) , p(3*n)
def median(i):
if not i._median:
i.all = sorted(i.all)
i._median=median(i.all)
return i._median
def __lt__(i,j):
return i.median() < j.median()
def spread(i):
i.all=sorted(i.all)
n1=i.n*0.25
n2=i.n*0.75
if len(i.all) <= 1:
return 0
if len(i.all) == 2:
return i.all[1] - i.all[0]
else:
return i.all[int(n2)] - i.all[int(n1)]
def different(l1,l2):
#return bootstrap(l1,l2) and a12(l2,l1)
return a12(l2,l1) and bootstrap(l1,l2)
def scottknott(data,cohen=0.3,small=3, useA12=False,epsilon=0.01):
"""Recursively split data, maximizing delta of
the expected value of the mean before and
after the splits.
Reject splits with under 3 items"""
#data = [d for d in data if d.spread() < 0.75]
all = reduce(lambda x,y:x+y,data)
#print sorted(all.all)
same = lambda l,r: abs(l.median() - r.median()) <= all.s()*cohen
if useA12:
same = lambda l, r: not different(l.all,r.all)
big = lambda n: n > small
return rdiv(data,all,minMu,big,same,epsilon)
def rdiv(data, # a list of class Nums
all, # all the data combined into one num
div, # function: find the best split
big, # function: rejects small splits
same, # function: rejects similar splits
epsilon): # small enough to split two parts
"""Looks for ways to split sorted data,
Recurses into each split. Assigns a 'rank' number
to all the leaf splits found in this way.
"""
def recurse(parts,all,rank=0):
"Split, then recurse on each part."
cut,left,right = maybeIgnore(div(parts,all,big,epsilon),
same,parts)
if cut:
# if cut, rank "right" higher than "left"
rank = recurse(parts[:cut],left,rank) + 1
rank = recurse(parts[cut:],right,rank)
else:
# if no cut, then all get same rank
for part in parts:
part.rank = rank
return rank
recurse(sorted(data),all)
return data
def maybeIgnore((cut,left,right), same,parts):
if cut:
if same(sum(parts[:cut],Num('upto')),
sum(parts[cut:],Num('above'))):
cut = left = right = None
return cut,left,right
def minMu(parts,all,big,epsilon):
"""Find a cut in the parts that maximizes
the expected value of the difference in
the mean before and after the cut.
Reject splits that are insignificantly
different or that generate very small subsets.
"""
cut,left,right = None,None,None
before, mu = 0, all.mu
for i,l,r in leftRight(parts,epsilon):
if big(l.n) and big(r.n):
n = all.n * 1.0
now = l.n/n*(mu- l.mu)**2 + r.n/n*(mu- r.mu)**2
if now > before:
before,cut,left,right = now,i,l,r
return cut,left,right
def leftRight(parts,epsilon=0.01):
"""Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
"""
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one
bs=0
def bootstrap(y0,z0,conf=0.01,b=1000):
"""The bootstrap hypothesis test from
p220 to 223 of Efron's book 'An
introduction to the boostrap."""
global bs
bs += 1
class total():
"quick and dirty data collector"
def __init__(i,some=[]):
i.sum = i.n = i.mu = 0 ; i.all=[]
for one in some: i.put(one)
def put(i,x):
i.all.append(x);
i.sum +=x; i.n += 1; i.mu = float(i.sum)/i.n
def __add__(i1,i2): return total(i1.all + i2.all)
def testStatistic(y,z):
"""Checks if two means are different, tempered
by the sample size of 'y' and 'z'"""
tmp1 = tmp2 = 0
for y1 in y.all: tmp1 += (y1 - y.mu)**2
for z1 in z.all: tmp2 += (z1 - z.mu)**2
s1 = (float(tmp1)/(y.n - 1))**0.5
s2 = (float(tmp2)/(z.n - 1))**0.5
delta = z.mu - y.mu
if s1+s2:
delta = delta/((s1/y.n + s2/z.n)**0.5)
return delta
def one(lst): return lst[ int(any(len(lst))) ]
def any(n) : return random.uniform(0,n)
y, z = total(y0), total(z0)
x = y + z
tobs = testStatistic(y,z)
yhat = [y1 - y.mu + x.mu for y1 in y.all]
zhat = [z1 - z.mu + x.mu for z1 in z.all]
bigger = 0.0
for i in range(b):
if testStatistic(total([one(yhat) for _ in yhat]),
total([one(zhat) for _ in zhat])) > tobs:
bigger += 1
return bigger / b < conf
def bootstrapd():
def worker(n=30,mu1=10,sigma1=1,mu2=10.2,sigma2=1):
def g(mu,sigma) : return random.gauss(mu,sigma)
x = [g(mu1,sigma1) for i in range(n)]
y = [g(mu2,sigma2) for i in range(n)]
return n,mu1,sigma1,mu2,sigma2,\
'different' if bootstrap(x,y) else 'same'
print worker(30, 10.1, 1, 10.2, 1)
print worker(30, 10.1, 1, 10.8, 1)
print worker(30, 10.1, 10, 10.8, 1)
def rdivDemo(data,max=100):
def z(x):
return int(100 * (x - lo) / (hi - lo + 0.00001))
data = map(lambda lst:Num(lst[0],lst[1:]),
data)
print ""
ranks=[]
for x in scottknott(data,useA12=True):
ranks += [(x.rank,x.median(),x)]
all=[]
for _,__,x in sorted(ranks):
all += x.quartiles()
all = sorted(all)
lo, hi = all[0], all[-1]
print "{\\scriptsize \\begin{tabular}{l@{~~~}l@{~~~}r@{~~~}r@{~~~}c}"
print "\\arrayrulecolor{darkgray}"
print '\\rowcolor[gray]{.9} rank & treatment & median & IQR & \\\\' #min= %s, max= %s\\\\' % (int(lo),int(hi))
last = None
for _,__,x in sorted(ranks):
q1,q2,q3 = x.quartiles()
pre =""
if not last == None and not last == x.rank:
pre= "\\hline"
print pre,'%2s & %12s & %s & %s & \quart{%s}{%s}{%s}{%s} \\\\' % \
(x.rank+1, x.name, q2, q3 - q1, z(q1), z(q3) - z(q1), z(q2),z(100))
last = x.rank
print "\\end{tabular}}"
def rdiv0():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",6, 7, 8, 9] ])
def rdiv1():
rdivDemo([
["x1",0.1, 0.2, 0.3, 0.4],
["x2",0.1, 0.2, 0.3, 0.4],
["x3",6, 7, 8, 9] ])
def rdiv2():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",0.6, 0.7, 0.8, 0.9],
["x3",0.15, 0.25, 0.4, 0.35],
["x4",0.6, 0.7, 0.8, 0.9],
["x5",0.1, 0.2, 0.3, 0.4] ])
def rdiv3():
rdivDemo([
["x1",101, 100, 99, 101, 99.5],
["x2",101, 100, 99, 101, 100],
["x3",101, 100, 99.5, 101, 99],
["x4",101, 100, 99, 101, 100] ])
def rdiv4():
rdivDemo([
["1",11,12,13],
["2",14,31,22],
["3",23,24,31],
["5",32,33,34]])
def rdiv5():
rdivDemo([
["1",11,11,11],
["2",11,11,11],
["3",11,11,11]])
def rdiv6():
rdivDemo([
["1",11,11,11],
["2",11,11,11],
["4",32,33,34,35]])
#rdiv0(); rdiv1(); rdiv2(); rdiv3(); rdiv4(); rdiv5(); rdiv6()
#exit()
repeats=10
exp='locOrNot'
models=['newCIIdataDeTune',
'xyz14deTune'
'coc81',
'nasa93']
if len(sys.argv)>=2:
repeats=eval(sys.argv[1])
if len(sys.argv)>=3:
exp=sys.argv[2]
if len(sys.argv)>3:
models=sys.argv[3:]
test1(repeats=repeats,models=map(eval,models),what=exp)
| unlicense |
brahmcapoor/naming-changes-complexity | analysis/subject_analysis.py | 1 | 8370 | from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import csv
import argparse
import os
import shutil
"""
The script required for the data analysis. Requires several python libraries
to function but otherwise isn't too complicated.
"""
def generate_all_graphs():
number_of_subjects = len(os.listdir("../subject logs")) - 1
for i in range(1, number_of_subjects + 1):
try:
os.mkdir("Subject {}".format(i))
except FileExistsError:
shutil.rmtree("Subject {}".format(i))
os.mkdir("Subject {}".format(i))
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
individual_graph(transparency_log_1, transparency_log_2, "Easy", i,
False)
individual_graph(transparency_log_3, transparency_log_4, "Hard", i,
False)
print("Generated graphs for subject {}".format(i))
def individual_graph(transparencies_1, transparencies_2, condition,
subject_number, display_graph=True):
x = [i for i in range(1, 81)]
sns.pointplot(x, transparencies_1, color='red')
plot = sns.pointplot(x, transparencies_2)
plot.set(xlabel="Trial", ylabel="Contrast",
title="{} Condition".format(condition))
if display_graph:
plt.show()
plot = plot.get_figure()
plot.savefig("Subject {}/{}.png".format(subject_number, condition))
plt.cla()
def find_turning_points(series):
turning_points = []
last_point = len(series) - 1
for i, point in enumerate(series):
if i != 0 and i != last_point:
if (point < series[i - 1] and point < series[i + 1]) or \
(point > series[i - 1] and point > series[i + 1]):
turning_points.append(point)
return turning_points
def find_threshold(log_1, log_2):
average_1 = 0
average_2 = 0
turning_points_1 = find_turning_points(log_1)
if turning_points_1:
average_1 = sum(turning_points_1)/len(turning_points_1)
else:
average_1 = 0
turning_points_2 = find_turning_points(log_2)
if turning_points_2:
average_2 = sum(turning_points_2)/len(turning_points_2)
else:
average_2 = 0
return (average_1 + average_2)/2
def load_subject_data(subject_number):
filename = "../subject logs/subject {}.csv".format(subject_number)
with open(filename, 'r') as f:
reader = csv.reader(f)
data = list(reader)[1:]
transparency_log_1 = [trial[1] for trial in data]
transparency_log_2 = [trial[3] for trial in data]
transparency_log_3 = [trial[5] for trial in data]
transparency_log_4 = [trial[7] for trial in data]
transparency_log_1 = list(map(lambda x: float(x), transparency_log_1))
transparency_log_2 = list(map(lambda x: float(x), transparency_log_2))
transparency_log_3 = list(map(lambda x: float(x), transparency_log_3))
transparency_log_4 = list(map(lambda x: float(x), transparency_log_4))
return (transparency_log_1, transparency_log_2, transparency_log_3,
transparency_log_4)
def check_subject_validity(subject_number):
with open('../subject logs/catch trials.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[subject_number]
catch_trials_valid = int(subject_info[1]) > 29 and int(subject_info[2]) < 5
with open('../memory_results_after.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[subject_number]
remembered_names_correctly = subject_info[2] == subject_info[3] and \
subject_info[4] == subject_info[5] and \
subject_info[6] == '' and \
subject_info[7] == ''
return catch_trials_valid and remembered_names_correctly
def graph_subject(subject_number):
try:
os.mkdir("Subject {}".format(subject_number))
except FileExistsError:
shutil.rmtree("Subject {}".format(subject_number))
os.mkdir("Subject {}".format(subject_number))
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(subject_number)
individual_graph(transparency_log_1, transparency_log_2, "Easy",
subject_number)
average_easy = find_threshold(transparency_log_1,
transparency_log_2)
print("Subject average for easy condition is {}".format(average_easy))
individual_graph(transparency_log_3, transparency_log_4, "Hard",
subject_number)
average_hard = find_threshold(transparency_log_3,
transparency_log_4)
print("Subject average for hard condition is {}".format(average_hard))
valid = check_subject_validity(int(subject_number))
if valid:
print("Subject is valid")
else:
print("Subject is invalid")
def show_summary_graph():
number_of_subjects = len(os.listdir("../subject logs")) - 1
subject_data = []
for i in range(1, number_of_subjects + 1):
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
easy_threshold = find_threshold(transparency_log_1, transparency_log_2)
hard_threshold = find_threshold(transparency_log_3, transparency_log_4)
subject_data.append((i, easy_threshold, "Easy",
check_subject_validity(i)))
subject_data.append((i, hard_threshold, "Hard",
check_subject_validity(i)))
df = pd.DataFrame(subject_data, columns=["Subject", "Threshold",
"Condition", "Valid"])
print(df)
plot = sns.factorplot(data=df,
x="Subject",
y="Threshold",
hue="Condition",
linestyles=[" ", " "],
legend=False,
size=8,
aspect=2)
plt.legend(loc='upper left')
plot.set(xlabel="Subject Number",
ylabel="Contrast",
title="Summary of all subjects")
plt.show()
plot.savefig("Summary.png")
def generate_results_file():
number_of_subjects = len(os.listdir("../subject logs")) - 1
table = []
for i in range(1, number_of_subjects + 1):
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
easy_threshold = find_threshold(transparency_log_1, transparency_log_2)
hard_threshold = find_threshold(transparency_log_3, transparency_log_4)
valid = check_subject_validity(i)
with open('../memory_results_after.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[i]
round_number = subject_info[1]
table.append([i, round_number, easy_threshold, hard_threshold, valid])
table = pd.DataFrame(table, columns=["Subject", "Round number",
"Easy Threshold", "Hard Threshold",
"Valid"])
table.to_csv("Summary.csv")
print("Results file can be found in Summary.csv")
def main():
plt.rcParams['figure.figsize'] = (18, 8)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--individual",
help="Analyze a particular subject",
action="store", metavar='')
group.add_argument("-s", "--summary", help="See a summary graph",
action="store_true")
group.add_argument("-r", "--result", help="Create a results file",
action="store_true")
args = parser.parse_args()
if args.individual:
if args.individual == 'a':
generate_all_graphs()
else:
graph_subject(args.individual)
if args.summary:
show_summary_graph()
if args.result:
generate_results_file()
if __name__ == '__main__':
main()
| mit |
wdurhamh/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_0.py | 33 | 2147 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2010, 1, 1)
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in mysym:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
synthicity/pandana | pandana/loaders/pandash5.py | 5 | 2024 | import pandas as pd
def remove_nodes(network, rm_nodes):
"""
Create DataFrames of nodes and edges that do not include specified nodes.
Parameters
----------
network : pandana.Network
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
Returns
-------
nodes, edges : pandas.DataFrame
"""
rm_nodes = set(rm_nodes)
ndf = network.nodes_df
edf = network.edges_df
nodes_to_keep = ~ndf.index.isin(rm_nodes)
edges_to_keep = ~(edf['from'].isin(rm_nodes) | edf['to'].isin(rm_nodes))
return ndf.loc[nodes_to_keep], edf.loc[edges_to_keep]
def network_to_pandas_hdf5(network, filename, rm_nodes=None):
"""
Save a Network's data to a Pandas HDFStore.
Parameters
----------
network : pandana.Network
filename : str
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
"""
if rm_nodes is not None:
nodes, edges = remove_nodes(network, rm_nodes)
else:
nodes, edges = network.nodes_df, network.edges_df
with pd.HDFStore(filename, mode='w') as store:
store['nodes'] = nodes
store['edges'] = edges
store['two_way'] = pd.Series([network._twoway])
store['impedance_names'] = pd.Series(network.impedance_names)
def network_from_pandas_hdf5(cls, filename):
"""
Build a Network from data in a Pandas HDFStore.
Parameters
----------
cls : class
Class to instantiate, usually pandana.Network.
filename : str
Returns
-------
network : pandana.Network
"""
with pd.HDFStore(filename) as store:
nodes = store['nodes']
edges = store['edges']
two_way = store['two_way'][0]
imp_names = store['impedance_names'].tolist()
return cls(
nodes['x'], nodes['y'], edges['from'], edges['to'], edges[imp_names],
twoway=two_way)
| agpl-3.0 |
inkenbrandt/loggerloader | loggerloader/llgui.py | 1 | 107310 | import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk, FigureCanvasTkAgg
# Implement the default Matplotlib key bindings.
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib.backend_bases import key_press_handler
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import os
import platform
import re
import glob
import pickle
import gzip
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import simpledialog
from tkcalendar import DateEntry
from collections import OrderedDict
from pandastable import Table
from pandastable import MultipleValDialog
from pandastable import plotting
from pandastable import TableModel
from pandastable import Style
from pandastable import dialogs
from pandastable import util
from pandastable import logfile
from pandastable import SimpleEditor
#from tksheet import Sheet
from pandas.plotting import register_matplotlib_converters
import time
register_matplotlib_converters()
style.use('ggplot')
import loggerloader as ll
class Feedback:
def __init__(self, master):
# create main window and configure size and title
# tk.Tk.__init__(self, *args, **kwargs)
master.geometry('1400x800')
master.wm_title("Transducer Processing")
self.root = master
self.main = master
# Get platform into a variable
self.currplatform = platform.system()
self.setConfigDir()
#if not hasattr(self,'defaultsavedir'):
self.defaultsavedir = os.path.join(os.path.expanduser('~'))
#self.sheetframes = {}
self.loadAppOptions()
#start logging
self.start_logging()
try:
self.root.iconbitmap(r'G:\My Drive\Python\Pycharm\loggerloader\data_files\icon.ico')
except:
pass
self.currentdir = os.path.expanduser('~')
#self.dropmenu(master)
self.createMenuBar()
self.datastr, self.data, self.datatable, self.combo = {}, {}, {}, {}
self.entry = {}
self.locidmatch = {}
self.bulktransfilestr = {} # dictionary to store trans file names
# Create side by side panel areas
self.panedwindow = ttk.Panedwindow(master, orient='horizontal')
self.panedwindow.pack(fill='both', expand=True)
self.process_frame = ttk.Frame(self.panedwindow, width=150, height=400, relief='sunken')
self.frame2 = ttk.Frame(self.panedwindow, width=400, height=400, relief='sunken')
self.panedwindow.add(self.process_frame, weight=2)
self.panedwindow.add(self.frame2, weight=3)
# add tabs in the frame to the right
self.notebook = ttk.Notebook(self.frame2)
self.notebook.pack(fill='both', expand=True)
self.notelist = {}
self.projopen = False
self.newProject()
# add tabs in the frame to the left
self.processing_notebook = ttk.Notebook(self.process_frame)
self.processing_notebook.pack(fill='both', expand=True)
#self.onewelltab = ttk.Frame(self.processing_notebook)
#https://stackoverflow.com/questions/3085696/adding-a-scrollbar-to-a-group-of-widgets-in-tkinter
self.frame = ttk.Frame(self.processing_notebook)
self.canvas = tk.Canvas(self.frame, borderwidth=0, width=150, height=800)
self.onewelltab = tk.Frame(self.canvas)
self.vsb = tk.Scrollbar(self.frame, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.onewelltab, anchor="nw", tags="self.frame")
self.onewelltab.bind("<Configure>", self.onFrameConfigure)
self.bulkwelltab = ttk.Frame(self.processing_notebook)
self.processing_notebook.add(self.frame, text='Single-Well Process')
self.processing_notebook.add(self.bulkwelltab, text='Bulk Well Process')
self.processing_notebook.bind("<<NotebookTabChanged>>", self.tab_update)
# SINGLE WELL PROCESSING TAB for left side of application ---------------------------------------------
# Header image logo and Description seen by user
frame_header = ttk.Frame(self.onewelltab)
frame_header.pack(pady=5)
# Included because some attachments fail when packaging code
ttk.Label(frame_header, wraplength=450,
text=" Utah Geological Survey Scripts for Processing transducer data").grid(row=0, column=0)
# Data Entry Frame
self.filefinders('well') # Select and import well data
self.outlierremove('well')
self.filefinders('baro') # Select and import baro data
# Align Data
self.add_alignment_interface()
# -----------Manual Data------------------------------------
# Select Manual Table Interface
ttk.Separator(self.onewelltab).pack(fill=tk.X, pady=5)
self.frame_step4 = ttk.Frame(self.onewelltab)
self.frame_step4.pack()
ttk.Label(self.frame_step4, text="4. Select Manual Data:").grid(row=0, column=0, columnspan=3)
self.manbook = ttk.Notebook(self.frame_step4)
self.manbook.grid(row=1, column=0, columnspan=3)
self.manframe = ttk.Frame(self.manbook)
self.manfileframe = ttk.Frame(self.manbook)
self.manbook.add(self.manframe, text='Manual Entry')
self.manbook.add(self.manfileframe, text='Data Import')
# validates time number inputs
self.measvalidation = (self.manframe.register(self.only_meas), '%P')
self.man_date, self.man_hour, self.man_min, self.man_meas, self.man_datetime = {}, {}, {}, {}, {}
# labels and date, time, and measure entry for manual measurements
ttk.Label(self.manframe, text="Date of Measure").grid(row=0, column=1)
ttk.Label(self.manframe, text="HH").grid(row=0, column=2, columnspan=1, sticky='WENS')
ttk.Label(self.manframe, text=":").grid(row=0, column=3, columnspan=1, sticky='WENS')
ttk.Label(self.manframe, text="MM").grid(row=0, column=4, columnspan=1, sticky='WENS')
ttk.Label(self.manframe, text="Measure").grid(row=0, column=5)
ttk.Label(self.manframe, text="Units").grid(row=0, column=6)
self.date_hours_min(0) # 1st manual measure
self.date_hours_min(1) # 2nd manual measure
# units
self.manunits = ttk.Combobox(self.manframe, width=5, values=['ft', 'm'], state="readonly")
self.manunits.grid(row=1, column=6, rowspan=3)
self.manunits.current(0)
# locid
ttk.Label(self.manframe, text="Locationid").grid(row=0, column=6)
self.man_locid = ttk.Entry(self.manframe, width=11)
self.man_locid.grid(row=1, column=6, rowspan=3)
# Tab for entering manual data by file
# TODO Auto align sheet fields to columns
manfileframetext = """File with manual data must have datetime, reading, and locationid fields"""
key = 'manual-single'
ttk.Label(self.manfileframe, text=manfileframetext).grid(row=0, column=0, columnspan=4)
self.datastr[key] = tk.StringVar(self.manfileframe)
self.datastr[key].set('G:/Shared drives/UGS_Groundwater/Projects/Transducers/manmeas.csv')
man_entry = ttk.Entry(self.manfileframe, textvariable=self.datastr[key], width=80, justify='left')
man_entry.grid(row=2, column=0, columnspan=4)
self.fillervals = ['readingdate', 'dtwbelowcasing', 'locid']
man_entry.bind('<Double-ButtonRelease-1>', lambda event: self.mandiag(event, key='manual-single'))
self.scombo, self.scombo_choice, self.scombo_label = {}, {}, {}
self.scombovals = {"Datetime": [3, 0, 15, self.fillervals, 4, 0],
"DTW": [3, 1, 15, self.fillervals, 4, 1],
"locationid": [3, 2, 15, self.fillervals, 4, 2],
"Pick id": [5, 1, 15, [1001, 1002], 5, 2]}
for ky, vals in self.scombovals.items():
self.scombo_choice[ky] = tk.StringVar()
self.scombo_label[ky] = ttk.Label(self.manfileframe, text=ky)
self.scombo_label[ky].grid(row=vals[0], column=vals[1])
self.scombo[ky] = ttk.Combobox(self.manfileframe, width=vals[2], values=self.fillervals,
textvariable=self.scombo_choice[ky],
postcommand=lambda: self.man_col_select_single(self.scombo[ky]))
self.scombo[ky].grid(row=vals[4], column=vals[5])
self.mandiag(False, key='manual-single')
ttk.Label(self.manfileframe, text="units").grid(row=3, column=3)
self.manunits = ttk.Combobox(self.manfileframe, width=5,
values=['ft', 'm'], state="readonly")
self.manunits.grid(row=4, column=3)
self.manunits.current(0)
b = ttk.Button(self.frame_step4,
text='Process Manual Data',
command=self.proc_man)
b.grid(column=0, row=2, columnspan=3)
self.fix_drift_interface() # Fix Drift Button
self.add_elevation_interface(self.onewelltab)
ttk.Separator(self.onewelltab, orient=tk.HORIZONTAL).pack(fill=tk.X)
save_onewell_frame = ttk.Frame(self.onewelltab)
save_onewell_frame.pack()
b = ttk.Button(save_onewell_frame, text='Save csv', command=self.save_one_well)
b.pack()
# BULK UPLOAD TAB of left side of application -------------------------------------------------------
# BulkUploader(self.bulkwelltab)
dirselectframe = ttk.Frame(self.bulkwelltab)
dirselectframe.pack()
self.make_well_info_frame(dirselectframe)
ttk.Separator(dirselectframe, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
# pick directory with transducer files and populate a scrollable window with combobox selections
filefinderframe = ttk.Frame(dirselectframe)
filefinderframe.pack()
ttk.Label(filefinderframe, text='3. Pick directory with relevant well files.').grid(column=1, row=0,
columnspan=2)
ttk.Label(filefinderframe, text='2. Pick Sampling Network').grid(column=0, row=0, columnspan=1)
self.datastr['trans-dir'] = tk.StringVar(filefinderframe, value=f'Double-Click for transducer file directory')
self.filefnd = ttk.Entry(filefinderframe, textvariable=self.datastr['trans-dir'], width=80, state='disabled')
self.filefnd.grid(column=1, row=1, columnspan=2)
self.combo_source = ttk.Combobox(filefinderframe,
values=['Snake Valley Wells', 'Wetlands Piezos', 'WRI', 'Other'],
state='disabled')
self.combo_source.grid(column=0, row=1)
self.combo_source.current(0)
filefoundframe = ttk.Frame(dirselectframe)
self.combo_source.bind("<<ComboboxSelected>>", lambda f: self.grab_trans_dir(filefoundframe))
self.filefnd.bind('<Double-ButtonRelease-1>', lambda f: self.grab_trans_dir(filefoundframe))
filefoundframe.pack()
ttk.Separator(dirselectframe, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
applymatchframe = ttk.Frame(dirselectframe)
applymatchframe.pack()
self.inputforheadertable = {}
self.bulk_match_button = tk.Button(applymatchframe,
text='5. Click when done matching files to well names',
command=lambda: self.make_file_info_table(master),
state='disabled')
self.bulk_match_button.pack()
ttk.Separator(dirselectframe, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
bulk_align_frame = ttk.Frame(dirselectframe)
bulk_align_frame.pack()
self.align_bulk_wb_button = tk.Button(bulk_align_frame,
text='6. Align Well-Baro Data',
command=self.align_well_baro_bulk,
state='disabled')
self.align_bulk_wb_button.grid(row=0, column=0)
self.export_align = tk.IntVar()
self.export_align_check = tk.Checkbutton(bulk_align_frame,
text="Export Aligned Data?",
variable=self.export_align,
state='disabled')
self.export_align_check.grid(row=0, column=1)
self.export_align_check.deselect()
ttk.Separator(dirselectframe, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
ttk.Label(dirselectframe, text='7. Import Manual Data').pack()
# self.manfileframe(dirselectframe).pack()
self.bulk_manfileframe = ttk.Frame(dirselectframe)
self.bulk_manfileframe.pack()
self.man_file_frame(self.bulk_manfileframe,key='bulk-manual')
self.proc_man_bulk_button = ttk.Button(self.bulk_manfileframe, text='Process Manual Data', command=self.proc_man_bulk)
self.proc_man_bulk_button.grid(column=1, row=5, columnspan=2)
self.proc_man_bulk_button['state'] = 'disabled'
ttk.Separator(dirselectframe, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
bulk_drift_frame = ttk.Frame(dirselectframe)
bulk_drift_frame.pack()
self.bfdb = ttk.Button(bulk_drift_frame, text='8. Fix Drift', command=self.bulk_fix_drift)
self.bfdb.grid(column=0, row=0, columnspan=1, rowspan=2)
self.bfdb['state'] = 'disabled'
self.export_drift = tk.IntVar(value=1)
self.export_drift_check = tk.Checkbutton(bulk_drift_frame,
text="Export Drift Data?",
variable=self.export_drift,
state='disabled')
self.export_drift_check.grid(row=0, column=1, sticky=tk.W)
self.export_drift_check.select()
self.export_drift_graph = tk.IntVar(value=1)
self.export_drift_graph_check = tk.Checkbutton(bulk_drift_frame,
text="Graph Data?",
variable=self.export_drift_graph,
state='disabled')
self.export_drift_graph_check.grid(row=1, column=1, sticky=tk.W)
self.export_drift_graph_check.select()
ttk.Label(bulk_drift_frame, text='Max Allowed Drift (ft)').grid(row=0, column=2)
self.max_allowed_drift = tk.DoubleVar(bulk_drift_frame, value=0.3)
ent = ttk.Entry(bulk_drift_frame, textvariable=self.max_allowed_drift, width=10)
ent.grid(row=1, column=2)
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def man_file_frame(self, master, key='manual'):
# self.manfileframe = ttk.Frame(master)
manfileframetext = """File with manual data must have datetime, reading, and locationid fields"""
ttk.Label(master, text=manfileframetext).grid(row=0, column=0, columnspan=4)
self.datastr[key] = tk.StringVar(master)
self.datastr[key].set('G:/Shared drives/UGS_Groundwater/Projects/Transducers/manmeas.csv')
man_entry = ttk.Entry(master, textvariable=self.datastr[key], width=80, justify='left')
man_entry.grid(row=2, column=0, columnspan=4)
man_entry.bind('<Double-ButtonRelease-1>', lambda e: self.mandiag(e, key=key))
fillervals = ['readingdate', 'dtwbelowcasing', 'locid']
self.combo, self.combo_choice, self.combo_label = {}, {}, {}
self.combovals = {"Datetime": [3, 0, 15, fillervals, 4, 0],
"DTW": [3, 1, 15, fillervals, 4, 1],
"locationid": [3, 2, 15, fillervals, 4, 2],
"Pick id": [5, 1, 15, [1001, 1002], 5, 2]}
for ky, vals in self.combovals.items():
self.man_combos(ky, vals, master)
if self.processing_notebook.index(self.processing_notebook.select()) == 1:
print('bulk')
self.combo["Pick id"]["state"] = "disabled"
self.combo["Pick id"].grid_forget()
ttk.Label(master, text="units").grid(row=3, column=3)
self.manunits = ttk.Combobox(master, width=5,
values=['ft', 'm'], state="readonly")
self.manunits.grid(row=4, column=3)
self.manunits.current(0)
# Populates Comboboxes with default file on G drive; if different drive, then passes
try:
self.data[key] = pd.read_csv(self.datastr[key].get())
mancols = list(self.data[key].columns.values)
for col in mancols:
if col.lower() in ['datetime', 'date', 'readingdate']:
self.combo_choice["Datetime"].set(col)
# self.combo["Datetime"].current(mancols.index(col))
elif col.lower() in ['dtw', 'waterlevel', 'depthtowater', 'water_level',
'level', 'depth_to_water', 'water_depth', 'depth',
'dtwbelowcasing', 'dtw_below_casing']:
self.combo_choice["DTW"].set(col)
elif col.lower() in ['locationid', 'locid', 'id', 'location_id', 'lid']:
self.combo_choice['locationid'].set(col)
except FileNotFoundError:
pass
def tab_update(self, event):
index = event.widget.index('current')
if 'combo_label' in self.__dict__:
if index == 1:
self.combo_label["Pick id"].grid_forget()
self.combo["Pick id"].grid_forget()
elif index == 0:
print("0")
def make_well_info_frame(self, master):
# select file for well-info-table
well_info_frame = ttk.Frame(master)
well_info_frame.pack()
key = 'well-info-table'
self.datastr[key] = tk.StringVar(well_info_frame)
self.datastr[key].set(
"G:/Shared drives/UGS_Groundwater/Projects/Transducers/ugs_ngwmn_monitoring_locations.csv")
ttk.Label(well_info_frame, text='1. Input well info file (must be csv)').grid(row=0, column=0, columnspan=3)
# ttk.Label(well_info_frame, text='must have altlocationid, locationname, stickup, barologgertype, and verticalmeasure').grid(row=1,column=0,columnspan=3)
e = ttk.Entry(well_info_frame, textvariable=self.datastr[key], width=80)
e.grid(row=1, column=0, columnspan=2)
e.bind('<Double-ButtonRelease-1>', lambda f: self.open_file(well_info_frame))
b = ttk.Button(well_info_frame, text='Process Well Info File', command=self.add_well_info_table)
b.grid(row=1, column=2)
def make_file_info_table(self, master):
popup = tk.Toplevel()
popup.geometry("400x100+200+200")
tk.Label(popup, text="Examining Directory...").pack()
pg = ttk.Progressbar(popup, orient=tk.HORIZONTAL, mode='determinate', length=200)
pg.pack()
key = 'file-info-table'
ht = ll.HeaderTable(self.datastr['trans-dir'].get())
filelist = ht.xle_csv_filelist()
pg.config(maximum=len(filelist))
fild = {}
wdf = {}
sv = tk.StringVar(popup, value='')
ttk.Label(popup, textvariable=sv).pack()
for file in filelist:
popup.update()
file_extension = os.path.splitext(file)[1]
base = os.path.basename(file)
if file_extension == '.xle':
fild[file], df = ht.xle_head(file)
elif file_extension == '.csv':
fild[file], df = ht.csv_head(file)
fild[file]['locationid'] = pd.to_numeric(
self.locnametoid.get(self.combo.get(fild[file]['file_name'], None).get(), None), errors="coerce",
downcast="integer")
if pd.isna(fild[file]['locationid']):
pass
print('na worked on file ', file)
else:
wdf[fild[file]['locationid']] = df.sort_index()
sv.set(base)
pg.step()
self.data['bulk-well'] = pd.concat(wdf, axis=0).sort_index()
# concatinate file info
df = pd.DataFrame.from_dict(fild, orient='index')
# df['locationid'] = df['file_name'].apply(lambda x: f"{self.locnametoid.get(self.combo.get(x,None).get(),None)}",1)
df['measuring_medium'] = df[['Model_number', 'Location', 'locationid']].apply(lambda x: self.detect_baro(x), 1)
df = df.reset_index().set_index('file_name').rename(columns={'index': 'full_file_path'})
graphframe, tableframe = self.note_tab_add(key, tabw=4, grph=1)
# add graph and table to new tab
# self.add_graph_table(key, tableframe, graphframe)
self.datatable[key] = Table(tableframe, dataframe=df, showtoolbar=True, showstatusbar=True)
self.datatable[key].show()
self.datatable[key].showIndex()
self.datatable[key].update()
self.align_bulk_wb_button['state'] = 'normal'
self.export_align_check['state'] = 'normal'
# self.bulk_data_file_button['state'] = 'normal'
popup.destroy()
def detect_baro(self, x):
if pd.isna(x[1]):
x[1] = 'water'
if x[0] == "M1.5" or 'baro' in x[1].lower() or x[2] in ('9003', '9049', '9024', '9025', '9027', '9063', '9067','9070', '9066'):
return "air"
else:
return "water"
def man_combos(self, lab, vals, master):
"""Generates Comboboxes for the manual file input sections"""
self.combo_choice[lab] = tk.StringVar()
self.combo_label[lab] = ttk.Label(master, text=lab)
self.combo_label[lab].grid(row=vals[0], column=vals[1])
self.combo[lab] = ttk.Combobox(master, width=vals[2],
textvariable=self.combo_choice[lab],
postcommand=lambda: self.man_col_select(self.combo[lab]))
self.combo[lab].grid(row=vals[4], column=vals[5])
def man_col_select(self, cmbo):
if 'manual' in self.data.keys() or 'bulk-manual' in self.data.keys() or 'manual-single' in self.data.keys():
if 'manual-single' in self.data.keys():
key = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key = 'bulk-manual'
else:
key = 'manual'
mancols = list(self.data[key].columns.values)
if cmbo == self.combo['Pick id']:
locids = self.data[key][pd.to_numeric(self.combo['locationid'].get(),
errors='coerce',
downcast='integer')].unique()
# TODO this will cause problems later; change to handle multiple types
cmbo['values'] = list([pd.to_numeric(loc, downcast='integer',errors='coerce') for loc in locids])
else:
cmbo['values'] = mancols
else:
messagebox.showinfo(title='Attention', message='Select a manual file!')
self.mandiag(True)
def man_col_select_single(self, cmbo):
if 'manual' in self.data.keys() or 'bulk-manual' in self.data.keys() or 'manual-single' in self.data.keys():
if 'manual-single' in self.data.keys():
key = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key = 'bulk-manual'
else:
key = 'manual'
mancols = list(self.data[key].columns.values)
print(self.scombo['locationid'].get())
if cmbo == self.scombo['Pick id']:
locids = self.data[key][self.scombo['locationid'].get()].unique()
# TODO this will cause problems later; change to handle multiple types
cmbo['values'] = list([pd.to_numeric(loc, downcast='integer') for loc in locids])
else:
cmbo['values'] = [0]
else:
messagebox.showinfo(title='Attention', message='Select a manual file!')
self.mandiag(True)
def date_hours_min(self, i):
ttk.Label(self.manframe, text=str(i + 1)).grid(row=i + 1, column=0)
# date picker
self.man_date[i] = DateEntry(self.manframe, width=20, locale='en_US', date_pattern='MM/dd/yyyy')
self.man_date[i].grid(row=i + 1, column=1, padx=2)
# time picker
self.man_hour[i] = ttk.Combobox(self.manframe, width=2, values=list([f'{i:02}' for i in range(0, 24)]),
state="readonly")
self.man_hour[i].grid(row=i + 1, column=2)
self.man_hour[i].current(0)
ttk.Label(self.manframe, text=":").grid(row=i + 1, column=3)
self.man_min[i] = ttk.Combobox(self.manframe, width=2,
values=list([f'{i:02}' for i in range(0, 60)]),
state="readonly")
self.man_min[i].grid(row=i + 1, column=4)
self.man_min[i].current(0)
# measure
self.man_meas[i] = ttk.Entry(self.manframe, validate="key", validatecommand=self.measvalidation, width=10)
self.man_meas[i].grid(row=i + 1, column=5, padx=2)
def filefinders(self, key):
datasets = {"well": "1. Select Well Data:",
"baro": "2. Select Barometric Data:"}
ttk.Separator(self.onewelltab, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
filefinderframe = ttk.Frame(self.onewelltab)
ttk.Label(filefinderframe, text=datasets[key]).pack()
ttk.Label(filefinderframe, text='(Right click for refresh.)').pack()
self.datastr[key] = tk.StringVar(filefinderframe, value=f'Double-Click for {key} file')
self.entry[key] = ttk.Entry(filefinderframe, textvariable=self.datastr[key], width=80)
self.entry[key].pack()
self.entry[key].bind('<Double-ButtonRelease-1>', lambda k: self.wellbarodiag(key))
self.entry[key].bind('<3>', lambda k: self.wellbaroabb(key))
filefinderframe.pack()
def outlierremove(self, key):
ttk.Separator(self.onewelltab, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
frame_step1_5 = ttk.Frame(self.onewelltab)
ttk.Label(frame_step1_5, text='1a. Fix Jumps and outliers (optional)').grid(column=0, row=0, columnspan=6)
dataminlab = ttk.Label(frame_step1_5, text='Min. Allowed Value:')
dataminlab.grid(column=0, row=1)
self.dataminvar = tk.DoubleVar(frame_step1_5, value=-10000.0)
self.datamaxvar = tk.DoubleVar(frame_step1_5, value=100000.0)
self.datamin = ttk.Entry(frame_step1_5, textvariable=self.dataminvar, width=10, state='disabled')
self.datamin.grid(column=1, row=1)
dataminlab = ttk.Label(frame_step1_5, text='Max. Allowed Value:')
dataminlab.grid(column=2, row=1)
self.datamax = ttk.Entry(frame_step1_5, textvariable=self.datamaxvar, width=10, state='disabled')
self.datamax.grid(column=3, row=1)
self.trimbutt = ttk.Button(frame_step1_5, text='Trim Extrema', command=self.trimextrema, state='disabled')
self.trimbutt.grid(column=4, row=1)
datajumplab = ttk.Label(frame_step1_5, text='Jump Tolerance:')
datajumplab.grid(column=0, row=2)
self.datajumptol = tk.DoubleVar(frame_step1_5, value=100.0)
self.datajump = ttk.Entry(frame_step1_5, textvariable=self.datajumptol, width=10, state='disabled')
self.datajump.grid(column=1, row=2)
self.jumpbutt = ttk.Button(frame_step1_5, text='Fix Jumps', command=self.fixjumps, state='disabled')
self.jumpbutt.grid(column=2, row=2)
frame_step1_5.pack()
#self.data[key]
def trimextrema(self):
if 'well' in self.data.keys():
if 'Level' in self.data['well'].columns:
self.data['well'] = self.data['well'][(self.data['well']['Level']>=self.dataminvar.get())&(self.data['well']['Level']<=self.datamaxvar.get())]
graphframe, tableframe = self.note_tab_add('well')
self.add_graph_table('well', tableframe, graphframe)
#self.datatable['well'].show()
#self.datatable['well'].update()
#self.datatable['well'].show()
else:
print('No column named Level')
pass
#TODO add dialog to select a column to adjust
def fixjumps(self):
if 'well' in self.data.keys():
if 'Level' in self.data['well'].columns:
self.data['well'] = ll.jumpfix(self.data['well'], 'Level', self.datajumptol.get())
graphframe, tableframe = self.note_tab_add('well')
self.add_graph_table('well', tableframe, graphframe)
#self.datatable['well'].show()
#self.datatable['well'].update()
#self.datatable['well'].show()
else:
print('No column named Level')
pass
#TODO add dialog to select a column to adjust
def fix_drift_interface(self):
# Fix Drift Button
ttk.Separator(self.onewelltab, orient=tk.HORIZONTAL).pack(fill=tk.X)
frame_step5 = ttk.Frame(self.onewelltab)
frame_step5.pack()
ttk.Label(frame_step5, text='5. Fix Drift').grid(column=0, row=1, columnspan=3)
self.max_drift = tk.StringVar(frame_step5, value="")
ttk.Button(frame_step5, text='Fix Drift',
command=self.fix_drift).grid(column=0, row=2, columnspan=1)
ttk.Label(frame_step5, text='Drift = ').grid(row=2, column=1)
ttk.Label(frame_step5, textvariable=self.max_drift).grid(row=2, column=2)
# self.locchk = ttk.Entry(self.frame_step5)
# self.locchk.grid(column=1,row=0)
def add_alignment_interface(self):
# Align Manual and Baro Data
ttk.Separator(self.onewelltab, orient=tk.HORIZONTAL).pack(fill=tk.X, pady=5)
frame_step3 = ttk.Frame(self.onewelltab)
frame_step3.pack()
ttk.Label(frame_step3, text="3. Align Baro and Well Data:").grid(row=0, column=0, columnspan=5)
ttk.Label(frame_step3, text='Pref. Data Freq.').grid(row=1, column=0, columnspan=2)
# Boxes for data frequency
self.freqint = ttk.Combobox(frame_step3, width=4, values=list(range(1, 120)))
self.freqint.grid(row=2, column=0)
self.freqint.current(59)
self.freqtype = ttk.Combobox(frame_step3, width=4, values=['min'])
self.freqtype.grid(row=2, column=1)
self.freqtype.current(0)
b = ttk.Button(frame_step3, text='Align Datasets',
command=self.alignedplot)
b.grid(row=2, column=2)
self.export_wb = tk.IntVar(value=1)
self.export_single_well_baro = tk.Checkbutton(frame_step3,
text="Export Well-Baro Data?",
variable=self.export_wb)
self.export_single_well_baro.grid(row=2, column=3, sticky=tk.W)
self.export_single_well_baro.select()
self.is_vented = tk.IntVar(value=0)
self.trans_vented = tk.Checkbutton(frame_step3,
text="Vented?",
variable=self.is_vented)
self.trans_vented.grid(row=2, column=4, sticky=tk.W)
#self.trans_vented.select()
def add_elevation_interface(self, master):
#
# Elevation Correction Interface
ttk.Separator(master, orient=tk.HORIZONTAL).pack(fill=tk.X)
frame_step6 = ttk.Frame(master)
frame_step6.pack()
ttk.Label(frame_step6, text='6. Align Elevation and Offset').grid(row=1, column=0, columnspan=4)
ttk.Label(frame_step6, text='Ground Elev.').grid(row=2, column=0)
ttk.Label(frame_step6, text='Stickup').grid(row=2, column=2)
ttk.Label(frame_step6, text='Elev. Units').grid(row=2, column=1)
ttk.Label(frame_step6, text='Stickup Units').grid(row=2, column=3)
self.wellgroundelev = ttk.Entry(frame_step6, width=6)
self.wellgroundelevunits = ttk.Combobox(frame_step6, width=5,
values=['ft', 'm'], state="readonly")
self.wellgroundelevunits.current(0)
self.wellstickup = ttk.Entry(frame_step6, width=4)
self.wellstickupunits = ttk.Combobox(frame_step6, width=5,
values=['ft', 'm'], state="readonly")
self.wellstickupunits.current(0)
self.wellgroundelev.grid(row=3, column=0)
self.wellgroundelevunits.grid(row=3, column=1)
self.wellstickup.grid(row=3, column=2)
self.wellstickupunits.grid(row=3, column=3)
b = ttk.Button(frame_step6, text='Calculate Elevations', command=self.elevcalc)
b.grid(row=4, column=0, columnspan=4, pady=5)
def elevcalc(self):
key = 'wl-elev'
mstickup = float(self.wellstickup.get())
melev = float(self.wellgroundelev.get())
if self.wellstickupunits.get() == 'm':
mstickup = mstickup * 3.2808
elif self.wellgroundelevunits.get() == 'm':
melev = melev * 3.2808
# TODO Replace these with ElevateWater class
# wlevels = ll.ElevateWater(self.datatable['manual'].model.df, melev, mstickup)
# self.manelevs = wlevels.manual_elevation()
df = self.datatable['fixed-drift'].model.df
# wlevels = ll.ElevateWater(self.datatable['fixed-drift'].model.df, melev, mstickup)
if 'manual-single' in self.data.keys():
key2 = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key2 = 'bulk-manual'
else:
key2 = 'manual'
self.datatable[key2].model.df['waterelevation'] = self.datatable[key2].model.df[
'dtwbelowcasing'] + mstickup + melev
self.datatable[key2].update()
self.manelevs = self.datatable[key2].model.df
df['waterelevation'] = self.datatable['fixed-drift'].model.df['DTW_WL'] + mstickup + melev
self.data[key] = df
graphframe, tableframe = self.note_tab_add(key)
self.add_graph_table(key, tableframe, graphframe)
print(self.manelevs)
def fix_drift(self):
key = 'fixed-drift'
if 'well-baro' in self.datatable.keys():
if 'manual-single' in self.data.keys():
key2 = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key2 = 'bulk-manual'
else:
key2 = 'manual'
self.datatable[key2].model.df['dtwbelowcasing'] = self.datatable[key2].model.df[
'dtwbelowcasing'] * -1
self.datatable[key2].update()
df, self.drift_info, mxdrft = ll.Drifting(self.datatable[key2].model.df,
self.datatable['well-baro'].model.df,
drifting_field='corrwl',
man_field='dtwbelowcasing',
output_field='DTW_WL').process_drift()
# df, self.drift_info, mxdrft = ll.fix_drift(self.datatable['well-baro'].model.df,
# self.datatable['manual'].model.df,
# manmeas='dtwbelowcasing')
self.max_drift.set(mxdrft)
if 'Temperature' in df.columns:
self.data[key] = df[['barometer', 'corrwl', 'DTW_WL','driftcorrection', 'Temperature']]
else:
self.data[key] = df[['barometer', 'corrwl', 'DTW_WL', 'driftcorrection']]
graphframe, tableframe = self.note_tab_add(key)
self.add_graph_table(key, tableframe, graphframe)
else:
tk.messagebox.showinfo(title='Yo!', message='Align the data first!')
def bulk_fix_drift(self):
popup = tk.Toplevel()
popup.geometry("400x400+200+200")
tk.Label(popup, text="Fixing Drift...").pack()
pg = ttk.Progressbar(popup, orient=tk.HORIZONTAL, mode='determinate', length=200)
pg.pack()
bulkdrift = {}
drift_info = {}
info = self.datatable['well-info-table'].model.df
try:
pg.config(maximum=len(self.data['bulk-well-baro'].index.get_level_values(0).unique()))
except KeyError:
tk.messagebox.showinfo(title='Yo!', message='Align the data first!')
sv = tk.StringVar(popup, value='')
ttk.Label(popup, textvariable=sv).pack()
# self.datatable['manual'].model.df['dtwbelowcasing'] = self.datatable['manual'].model.df['dtwbelowcasing'] *-1
print(self.data['bulk-well-baro'].index.get_level_values(0).unique())
for i in self.data['bulk-well-baro'].index.get_level_values(0).unique():
popup.update()
if pd.notnull(i):
if 'bulk-manual' in self.data.keys():
key2 = 'bulk-manual'
elif 'manual-single' in self.data.keys():
key2 = 'manual-single'
else:
key2 = 'manual'
try:
mandf = self.datatable[key2].model.df.loc[int(i)]
except KeyError:
print('trying manual-single')
mandf = self.datatable['manual-single'].model.df.loc[int(i)]
wellbaro = self.data['bulk-well-baro'].loc[int(i)]
dftcorr, dfrinf, max_drift = ll.Drifting(mandf,
wellbaro,
drifting_field='corrwl',
man_field='dtwbelowcasing',
output_field='DTW_WL').process_drift()
mstickup = info.loc[i, 'stickup']
melev = info.loc[i, 'verticalmeasure']
name = info.loc[i, 'locationname']
dfrinf['name'] = name
#df['name'] = name
drift_info[i] = dfrinf#.reset_index()
if max_drift > self.max_allowed_drift.get():
ttk.Label(popup, text=f'{name} drift too high at {max_drift}!').pack()
pass
else:
dftcorr['waterelevation'] = dftcorr['DTW_WL'] + mstickup + melev
bulkdrift[i] = dftcorr
# bulkdrift[i] = ll.get_trans_gw_elevations(df, mstickup, melev, site_number = i, level='corrwl', dtw='DTW_WL')
sv.set(f"{name} has a max drift of {max_drift}")
pg.step()
self.data['bulk-fix-drift'] = pd.concat(bulkdrift)
#self.data['bulk-fix-drift'] = self.data['bulk-fix-drift']
key = 'drift-info'
self.data[key] = pd.concat(drift_info, sort=True, ignore_index=True).set_index('name')
graphframe, tableframe = self.note_tab_add(key)
self.datatable[key] = Table(tableframe, dataframe=self.data[key], showtoolbar=True, showstatusbar=True)
self.datatable[key].show()
self.datatable[key].showIndex()
self.datatable[key].update()
popup.destroy()
if self.export_drift.get() == 1:
df = self.data['bulk-fix-drift']
if 'level_0' in df.columns:
df = df.drop(['level_0'], axis=1)
print(df.head())
df.index.name = 'locationid'
df = df.reset_index()
if 'level_0' in df.columns:
df = df.rename(columns={'level_0':'locationid'})
df['measureddtw'] = df['DTW_WL']
df = df.rename(columns={'DateTime':'readingdate','Level':'measuredlevel','Temperature':'temperature',
'DTW_WL':'measureddtw'})
df = df[['locationid','readingdate','measuredlevel','temperature',
'measureddtw','driftcorrection','waterelevation']]
file = filedialog.asksaveasfilename(filetypes=[('csv', '.csv')], defaultextension=".csv")
df.to_csv(file)
if self.export_drift_graph.get() == 1:
pdffile = filedialog.asksaveasfilename(filetypes=[('pdf', '.pdf')], defaultextension=".pdf")
with PdfPages(pdffile) as pdf:
popup = tk.Toplevel()
popup.geometry("500x500+200+200")
tk.Label(popup, text="Graphing Data...").pack()
pg = ttk.Progressbar(popup, orient=tk.HORIZONTAL, mode='determinate', length=200)
pg.pack()
pg.config(maximum=len(self.data['bulk-fix-drift'].index.get_level_values(0).unique()))
fig = plt.figure(figsize=(5, 5))
canvas = FigureCanvasTkAgg(fig, master=popup)
for ind in self.data['bulk-fix-drift'].index.get_level_values(0).unique():
popup.update()
if pd.notnull(ind):
ax = fig.add_subplot(111)
fig.canvas.draw()
df = self.data['bulk-fix-drift'].loc[ind]
df = df.dropna(subset=['waterelevation'])
if 'bulk-manual' in self.data.keys():
key2 = 'bulk-manual'
elif 'manual-single' in self.data.keys():
key2 = 'manual-single'
else:
key2 = 'manual'
try:
mandf = self.datatable[key2].model.df.loc[ind]
except KeyError:
mandf = self.datatable['manual-single'].model.df.loc[ind]
mandf = mandf.dropna(subset=['waterelevation'])
if len(df) > 0 and len(mandf) > 0:
title = info.loc[int(ind), 'locationname']
ax.plot(df.index, df['waterelevation'],color='blue')
ax.scatter(mandf.index, mandf['waterelevation'],color='red')
ax.set_ylabel('Water Level Elevation')
ax.set_ylim(min(df['waterelevation'])-0.1, max(df['waterelevation'])+0.1)
ax.set_xlim(df.first_valid_index() - pd.Timedelta(days=3),
df.last_valid_index() + pd.Timedelta(days=3))
#ax.tick_params(axis='x', labelrotation=45)
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.draw()
plt.title(title)
pdf.savefig(fig)
plt.close()
fig.delaxes(ax)
pg.step()
popup.destroy()
def proc_man(self):
nbnum = self.manbook.index(self.manbook.select())
if 'manual-single' in self.data.keys():
key = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key = 'bulk-manual'
else:
key = 'manual'
if nbnum == 0:
for i in [0, 1]:
self.man_datetime[i] = pd.to_datetime(
f'{self.man_date[i].get()} {self.man_hour[i].get()}:{self.man_min[i].get()}',
format='%m/%d/%Y %H:%M')
df = pd.DataFrame({'readingdate': [self.man_datetime[0], self.man_datetime[1]],
'dtwbelowcasing': [float(self.man_meas[0].get()),
float(self.man_meas[1].get())],
'locationid': [self.man_locid.get()] * 2,
'units': [self.manunits.get()] * 2})
if self.manunits.get() == 'm':
df['dtwbelowcasing'] = df['dtwbelowcasing'] * 3.28084
self.data[key] = df.set_index(['readingdate'])
print(self.data[key])
elif nbnum == 1:
df = self.data[key].rename(columns={self.scombo['Datetime'].get(): 'readingdate',
self.scombo['DTW'].get(): 'dtwbelowcasing',
self.scombo['locationid'].get(): 'locationid'})
df['units'] = self.manunits.get()
if self.manunits.get() == 'm':
df['dtwbelowcasing'] = df['dtwbelowcasing'] * 3.28084
df = df.reset_index()
df['readingdate'] = df['readingdate'].apply(lambda x: pd.to_datetime(x, infer_datetime_format=True,
errors='ignore'))
df['dtwbelowcasing'] = df['dtwbelowcasing'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
df = df.set_index(['readingdate'])
df = df[['dtwbelowcasing', 'locationid', 'units']]
if 'well' in self.datatable.keys():
df = df[df.index > self.datatable['well'].model.df.first_valid_index() - pd.DateOffset(days=8)]
self.data[key] = df[df['locationid'] == pd.to_numeric(self.scombo['Pick id'].get(), downcast='integer')]
graphframe, tableframe = self.note_tab_add(key)
self.add_graph_table(key, tableframe, graphframe)
def bulk_wlelev(self, x, inf, pg, pop):
pop.update()
wl = x[1] + inf.loc[x[0], 'stickup'] + inf.loc[x[0], 'verticalmeasure']
pg.step()
return wl
def proc_man_bulk(self):
key = 'bulk-manual'
#if 'bulk-manual' in self.data.keys():
# key = 'bulk-manual'
#elif 'manual-single' in self.data.keys():
# key = 'manual-single'
#else:
# key = 'manual'
try:
df = self.data[key].rename(columns={self.combo['Datetime'].get(): 'readingdate',
self.combo['DTW'].get(): 'dtwbelowcasing',
self.combo['locationid'].get(): 'locationid'})
df['units'] = self.manunits.get()
if self.manunits.get() == 'm':
df['dtwbelowcasing'] = df['dtwbelowcasing'] * 3.28084
df = df.reset_index()
df['readingdate'] = df['readingdate'].apply(lambda x: pd.to_datetime(x, infer_datetime_format=True,
errors='ignore'))
df['dtwbelowcasing'] = df['dtwbelowcasing'].apply(lambda x: -1 * pd.to_numeric(x, errors='coerce'))
# df = df.set_index(['locationid', 'readingdate'])
# df = df['dtwbelowcasing']
info = self.datatable['well-info-table'].model.df
df = df[df['locationid'].isin(info.index)]
popup = tk.Toplevel()
popup.geometry("400x100+200+200")
tk.Label(popup, text="Calculating manual elevations...").pack()
pg = ttk.Progressbar(popup, orient=tk.HORIZONTAL, mode='determinate', length=200)
pg.pack()
pg.config(maximum=len(df.index.get_level_values(0)))
df['waterelevation'] = df[['locationid', 'dtwbelowcasing']].apply(lambda x: self.bulk_wlelev(x, info, pg, popup), 1)
df = df.set_index(['locationid', 'readingdate'])
popup.destroy()
self.data[key] = df
graphframe, tableframe = self.note_tab_add(key)
self.add_graph_table(key, tableframe, graphframe)
self.export_drift_graph_check['state'] = 'normal'
self.export_drift_check['state'] = 'normal'
self.bfdb['state'] = 'normal'
except KeyError:
print()
tk.messagebox.showerror(title='Process Well Info Table First', message="Process Well Info Table First")
def only_meas(self, value_if_allowed):
try:
float(value_if_allowed)
bool = True
except ValueError:
bool = False
return bool
def decrease(self):
x, y = self.line.get_data()
self.line.set_ydata(y * 0.8)
self.canvas.draw()
def increase(self):
x, y = self.line.get_data()
self.line.set_ydata(y * 1.2)
self.canvas.draw()
def _quit(self):
self.quit() # stops mainloop
self.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
def on_key_press(self, event):
print("you pressed {}".format(event.key))
key_press_handler(event, self.canvas, self.toolbar)
def note_tab_add(self, key, tabw=1, grph=4):
"""
Args:
key (str): name of dataset; ex 'well','baro','well-baro','manual','fixed-drift'
Returns:
"""
print(key)
if key in self.notelist.keys():
self.notebook.forget(self.notelist[key])
self.notelist[key] = 'old'
new_frame = ttk.Frame(self.notebook)
self.notebook.add(new_frame, text=key)
for t in range(len(self.notebook.tabs())):
self.notelist[self.notebook.tab(t)['text']] = t
self.notebook.select(t)
panedframe = ttk.Panedwindow(new_frame, orient='vertical')
panedframe.pack(fill='both', expand=True)
tableframe = ttk.Frame(panedframe, relief='sunken')
graphframe = ttk.Frame(panedframe, relief='sunken')
panedframe.add(tableframe, weight=tabw)
panedframe.add(graphframe, weight=grph)
labframe = ttk.Frame(graphframe)
labframe.pack()
ttk.Label(labframe, text='Click on column of choice and then the Plot button!').pack()
return graphframe, tableframe
def add_graph_table(self, key, tableframe, graphframe):
"""
Args:
key (str): name of dataset; ex 'well','baro','well-baro','manual','fixed-drift'
tableframe: parent tk frame for pandastable data table
graphframe: parent tk frame for pandastable graph
Returns:
adds pandastable elements to a frame
"""
graph_frame1 = ttk.Frame(graphframe)
self.datatable[key] = Table(tableframe, dataframe=self.data[key], showtoolbar=True, showstatusbar=True)
self.datatable[key].show()
self.datatable[key].showIndex()
self.datatable[key].update()
self.datatable[key].showPlotViewer(parent=graph_frame1)
canvas = self.datatable[key].showPlotViewer(parent=graph_frame1).canvas
if key == 'well-baro':
self.add_baro_axis(graph_frame1)
elif key == 'fixed-drift':
self.add_manual_points(key, graph_frame1)
elif key == 'wl-elev':
self.add_manual_points(key, graph_frame1)
elif key == 'bulk-baro':
self.plot_bulk_baro(graph_frame1)
toolbar = NavigationToolbar2Tk(canvas, graph_frame1)
toolbar.update()
canvas.draw()
canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
canvas.mpl_connect("key_press_event", self.on_key_press)
graph_frame1.pack()
self.sheets[key] = self.datatable[key]
def plot_bulk_baro(self, graph_frame1):
key = 'bulk-baro'
ax = self.datatable[key].showPlotViewer(parent=graph_frame1).ax
for wellid in self.datatable[key].model.df.index.get_level_values(0).unique():
ax.plot(self.datatable[key].model.df.loc[wellid, 'Level'], label=wellid)
ax.set_ylabel('well levels', color='blue')
ax.legend()
def add_baro_axis(self, graph_frame1):
key = 'well-baro'
ax = self.datatable[key].showPlotViewer(parent=graph_frame1).ax
lns1 = ax.plot(self.datatable[key].model.df['Level'], color='green', label='unprocessed')
lns2 = ax.plot(self.datatable[key].model.df['corrwl'], color='blue', label='baro-removed')
ax2 = ax.twinx()
lns3 = ax2.plot(self.datatable[key].model.df['barometer'], color='red', label='baro')
ax2.set_ylabel('barometer', color='red')
ax.set_ylabel('well levels', color='blue')
lns = lns1 + lns2 + lns3
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=0)
def add_manual_points(self, key, graph_frame1):
ax = self.datatable[key].showPlotViewer(parent=graph_frame1).ax
if 'manual-single' in self.data.keys():
key2 = 'manual-single'
elif 'bulk-manual' in self.data.keys():
key2 = 'bulk-manual'
else:
key2 = 'manual'
if key == 'fixed-drift':
ax.plot(self.datatable[key].model.df['DTW_WL'], color='green', label='unprocessed')
ax.scatter(self.datatable[key2].model.df.index, self.datatable[key2].model.df['dtwbelowcasing'])
ax.set_ylabel(f"Depth to Water (ft)")
elif key == 'wl-elev':
ax.plot(self.datatable[key].model.df['waterelevation'], color='green', label='unprocessed')
ax.scatter(self.datatable[key2].model.df.index, self.datatable[key2].model.df['waterelevation'])
ax.set_ylabel(f"Water Elevation (ft)")
ax.set_xlim(self.datatable[key2].model.df.first_valid_index() - pd.Timedelta('3 days'),
self.datatable[key2].model.df.last_valid_index() + pd.Timedelta('3 days'), )
def wellbaroabb(self, key):
if self.datastr[key].get() == '' or type(self.datastr[key].get()) == tuple or self.datastr[
key].get() == f'Double-Click for {key} file':
pass
else:
if key in ('well'):
self.data[key] = ll.NewTransImp(self.datastr[key].get()).well.drop(['name'], axis=1)
filenm, self.file_extension = os.path.splitext(self.datastr[key].get())
self.datamin['state'] = 'normal'
self.datamax['state'] = 'normal'
self.trimbutt['state'] = 'normal'
self.datajump['state'] = 'normal'
self.jumpbutt['state'] = 'normal'
if 'Level' in self.data['well'].columns:
self.dataminvar.set(self.data['well']['Level'].min())
self.datamaxvar.set(self.data['well']['Level'].max())
elif key in ('baro'):
self.data[key] = ll.NewTransImp(self.datastr[key].get()).well.drop(['name'], axis=1)
filenm, self.file_extension = os.path.splitext(self.datastr[key].get())
elif key in ('manual','bulk-manual','manual-single'):
filenm, file_extension = os.path.splitext(self.datastr[key].get())
if file_extension in ('.xls', '.xlsx'):
self.data[key] = pd.read_excel(self.datastr[key].get())
elif file_extension == '.csv':
self.data[key] = pd.read_csv(self.datastr[key].get())
# add notepad tab
graphframe, tableframe = self.note_tab_add(key)
# add graph and table to new tab
self.add_graph_table(key, tableframe, graphframe)
def wellbarodiag(self, key):
ftypelist = (("Solinst xle", "*.xle*"), ("Solinst csv", "*.csv"))
self.datastr[key].set(filedialog.askopenfilename(initialdir=self.currentdir,
title=f"Select {key} file",
filetypes=ftypelist))
self.currentdir = os.path.dirname(self.datastr[key].get())
# Action if cancel in file dialog is pressed
self.wellbaroabb(key)
def alignedplot(self):
"""
Returns: notepad tab with aligned data;
TODO Add File type combo to improve csv processing
"""
if 'well' in self.data.keys() and 'baro' in self.data.keys():
key = 'well-baro'
if self.is_vented == 1:
sol = True
else:
sol = False
self.data[key] = ll.well_baro_merge(self.datatable['well'].model.df,
self.datatable['baro'].model.df,
sampint=self.freqint.get(),
vented = sol)
graphframe, tableframe = self.note_tab_add(key)
self.add_graph_table(key, tableframe, graphframe)
if self.export_wb.get() == 1:
df = self.data[key]
df.index.name = 'locationid'
df = df.reset_index()
file = filedialog.asksaveasfilename(filetypes=[('csv', '.csv')], defaultextension=".csv")
df.to_csv(file)
def align_well_baro_bulk(self):
# TODO add feature to recognize global water transducers
if 'bulk-well' in self.data.keys():
files = self.datatable['file-info-table'].model.df
info = self.datatable['well-info-table'].model.df
wellids = self.data['bulk-well'].index.get_level_values(0).unique()
mergedf = {}
popup = tk.Toplevel()
popup.geometry("400x100+200+200")
tk.Label(popup, text="Aligning datasets...").pack()
pg = ttk.Progressbar(popup, orient=tk.HORIZONTAL, mode='determinate', length=200)
pg.pack()
pg.config(maximum=len(wellids))
sv = tk.StringVar(popup, value='')
ttk.Label(popup, textvariable=sv).pack()
for wellid in wellids:
popup.update()
if wellid is not None and pd.notna(wellid) and pd.notnull(wellid):
if info.loc[int(wellid), 'barologgertype'] != "None" and info.loc[
int(wellid), 'barologgertype'] != "":
baroid = pd.to_numeric(info.loc[int(wellid), 'barologgertype'],
downcast='integer', errors='coerce')
medium = files[files['locationid'] == wellid]['measuring_medium'].values[0]
name = info.loc[int(wellid), "locationname"]
ttype = files[files['locationid'] == wellid]['trans type'].values[0]
if ttype == 'Solinst':
sol = False
elif ttype == 'Global Water':
sol = True
else:
sol = False
if baroid in files['locationid'].unique() and medium == 'water':
mergedf[int(wellid)] = ll.well_baro_merge(self.data['bulk-well'].loc[int(wellid)],
self.data['bulk-well'].loc[int(baroid)],
vented=sol)
else:
print(f'no baroid for well {wellid}')
name = 'No Name'
sv.set(f"aligning {name} = {wellid}")
pg.step()
popup.destroy()
df = pd.concat(mergedf, names=['locationid'])
df = df.reset_index()
df['DateTime'] = pd.to_datetime(df['DateTime'], errors='coerce')
df = df.set_index(['locationid', 'DateTime'])
df = df[['Level', 'Temperature', 'barometer', 'dbp', 'dwl', 'corrwl']]
self.data['bulk-well-baro'] = df
if self.export_align.get() == 1:
file = filedialog.asksaveasfilename(filetypes=[('csv', '.csv')], defaultextension=".csv")
self.data['bulk-well-baro'].to_csv(file)
def mandiag(self, event, key='manual'):
if event:
self.datastr[key].set(filedialog.askopenfilename(initialdir=self.currentdir,
title=f"Select {key} file",
filetypes=[('csv', '.csv')],
defaultextension=".csv"))
self.currentdir = os.path.dirname(self.datastr[key].get())
# https://stackoverflow.com/questions/45357174/tkinter-drop-down-menu-from-excel
# TODO add excel sheet options to file selection
# self.graph_frame1.pack()
if self.datastr[key].get() == '' or self.datastr[key].get() == f'Double-Click for {key} file':
self.datastr[key].set(f'Double-Click for {key} file')
else:
try:
filenm, file_extension = os.path.splitext(self.datastr[key].get())
if file_extension in ('.xls', '.xlsx'):
self.data[key] = pd.read_excel(self.datastr[key].get())
elif file_extension == '.csv':
self.data[key] = pd.read_csv(self.datastr[key].get())
print('file read')
mancols = list(self.data[key].columns.values)
self.fillervals = mancols
for col in mancols:
if col.lower() in ['datetime', 'date', 'readingdate']:
if key == 'manual':
self.combo_choice["Datetime"].set(col)
else:
self.scombo_choice["Datetime"].set(col)
# self.combo["Datetime"].current(mancols.index(col))
elif col.lower() in ['dtw', 'waterlevel', 'depthtowater', 'water_level',
'level', 'depth_to_water', 'water_depth', 'depth',
'dtwbelowcasing', 'dtw_below_casing']:
if key == 'manual' or key=='bulk-manual':
self.combo_choice["DTW"].set(col)
else:
self.scombo_choice["DTW"].set(col)
elif col.lower() in ['locationid', 'locid', 'id', 'location_id', 'lid']:
if key == 'manual' or key =='bulk-manual':
self.combo_choice['locationid'].set(col)
else:
self.scombo_choice['locationid'].set(col)
except FileNotFoundError:
pass
def save_one_well(self):
filename = filedialog.asksaveasfilename(confirmoverwrite=True)
if filename is None:
print('no')
return
else:
df = self.datatable['wl-elev'].model.df
df['measureddtw'] = -1*df['DTW_WL']
df = df.rename(columns={'Temperature':'temperature',
'corrwl':'measuredlevel'})
df = df.drop(['DTW_WL'], axis=1)
df.to_csv(filename)
return
def open_file(self, master):
"""This function creates a file dialog to select the well-info-file and then uses the filename to
make a pandas dataframe; this dataframe is fed to the add_well_info_table function to display it in the
spreadsheet
Args:
master:
Returns:
"""
key = 'well-info-table'
try:
self.datastr[key].set(filedialog.askopenfilename(initialdir=self.currentdir, title="Select well info file"))
if self.datastr[key].get() == '' or type(self.datastr[key].get()) == tuple or \
self.datastr[key].get() == 'Double-Click for transducer file directory':
pass
else:
self.add_well_info_table()
except KeyError:
tk.messagebox.showerror(title='Need to rename columns', message="""This table needs fields with labels
'altlocationid','stickup','locationname','verticalmeasure','barologgertype'. They do not have to be
in order.""")
def add_well_info_table(self):
"""Creates well-info-table tab and table frame for bulk data uploads; this table is used to match filenames to
locationids and is used to get elevation and stickup in bulk data
Returns:
"""
key = 'well-info-table'
self.currentdir = os.path.dirname(self.datastr[key].get())
df = pd.read_csv(self.datastr[key].get())
df = df[df['altlocationid'].notnull()]
df['altlocationid'] = df['altlocationid'].apply(lambda x: pd.to_numeric(x, downcast='integer', errors='coerce'),
1)
df = df.set_index(['altlocationid']).sort_index()
# df.index = df.index.astype('int64')
self.data[key] = df
graphframe, tableframe = self.note_tab_add(key, tabw=5, grph=1)
self.datatable[key] = Table(tableframe, dataframe=self.data[key], showtoolbar=True, showstatusbar=True)
self.datatable[key].show()
self.datatable[key].showIndex()
self.datatable[key].update()
self.filefnd['state'] = 'normal'
self.combo_source['state'] = 'normal'
self.proc_man_bulk_button['state'] = 'normal'
def grab_trans_dir(self, master):
"""grabs directory containing transducer files and inputs filenames into a scrollable canvas with comboboxes to
match up well names with locationids.
Args:
master:
Returns:
Dictionary of matches between files and locationids
TODO make this work for and wri files
#https://stackoverflow.com/questions/28736028/python-tkinter-reference-in-comboboxes-created-in-for-loop
"""
key = 'trans-dir'
self.datastr[key].set(filedialog.askdirectory(initialdir=self.currentdir,
title="Select transducer directory"))
if self.datastr[key].get() == '' or type(self.datastr[key].get()) == tuple or \
self.datastr[key].get() == 'Double-Click for transducer file directory':
pass
else:
ttk.Separator(master, orient=tk.HORIZONTAL).grid(row=0, column=0, columnspan=3, sticky='ew', pady=5)
self.currentdir = os.path.dirname(self.datastr[key].get())
# https://stackoverflow.com/questions/45357174/tkinter-drop-down-menu-from-excel
# TODO add excel sheet options to file selection
filenm, file_extension = os.path.splitext(self.datastr[key].get())
ttk.Label(master, text='4. Match id with list of files.').grid(row=1, column=0, columnspan=3)
ttk.Label(master, text='Filename').grid(row=2, column=0)
ttk.Label(master, text='Match Name').grid(row=2, column=1)
ttk.Label(master, text='Well ID').grid(row=2, column=2, sticky=tk.W)
# https://blog.tecladocode.com/tkinter-scrollable-frames/
container = ttk.Frame(master)
canvas = tk.Canvas(container)
scrollbar = ttk.Scrollbar(container, orient="vertical", command=canvas.yview)
scrollbarx = ttk.Scrollbar(container, orient="horizontal", command=canvas.xview)
scrollable_frame = ttk.Frame(canvas)
if 'well-info-table' in self.datatable.keys():
df = self.datatable['well-info-table'].model.df
if self.combo_source.get() == 'Snake Valley Wells':
df['locationnamelwr'] = df['locationname'].apply(lambda x: x.lower(), 1)
elif self.combo_source.get() == 'Wetlands Piezos':
df['locationnamelwr'] = df.index.map(str)
else:
df['locationnamelwr'] = df['locationname'].apply(lambda x: x.lower(), 1)
self.locdict = df['locationnamelwr'].to_dict()
self.welldict = {y: x for x, y in self.locdict.items()}
self.locnamedict = dict(zip(df['locationnamelwr'].values, df['locationname'].values))
self.locnametoid = dict(zip(df['locationname'].values, df.index.values))
syndict = {73: ['Eskdale MX', ['eskmx', 'eskdalemx', 'edmx']],
69: ['Twin Springs MX', ['tsmx', 'twinmx', 'twin', 'twin springs mx']],
70: ['Snake Valley North MX', ['svnmx', 'snakevnmx', 'northmx']],
71: ['Snake Valley South MX', ['svsmx', 'snakevsmx', 'southmx']],
46: ['Coyote Knolls MX', ['cksmx', 'ckmx', 'coyoteknollsmx', 'pw17mx']],
72: ['Needle Point 23a', ['needle', 'sg23a', 'needpnt']],
74: ['Shell-Baker', ['shell', 'shellbaker']],
9003: ['PW03 Baro', ['pw03baro']],
9027: ['PW10 Baro', ['pw10baro']],
9049: ['PW19 Baro', ['pw19baro']],
68: ['SG27', ['sg27a']],
39: ['AG15', ['pw15', 'ag15', 'pw15a', 'ag15a']],
136: ['Callao C119', ['callao', 'callaoag']],
75: ['Central Tule MX', ['ctvmx', 'centraltulemx', 'ctulemx', 'ctmx']],
51: ['PW20', ['pw20a']]}
for key, value in syndict.items():
for syn in value[1]:
self.welldict[syn] = key
self.locnamedict[syn] = value[0]
i = 0
for file in glob.glob(self.datastr['trans-dir'].get() + '/*'):
filew_ext = os.path.basename(file)
filestr = ll.getfilename(file)
if self.combo_source.get() == 'Snake Valley Wells':
a = re.split('_|\s', filestr)[0].lower()
elif self.combo_source.get() == 'Wetlands Piezos':
try:
a = filestr.replace('-_', '-').split('-')[1].split('_')[0].lower()
except:
a = filestr.lower()
else:
a = filestr.lower()
ttk.Label(scrollable_frame, text=filestr, width=30).grid(row=i, column=0)
self.locidmatch[filestr] = tk.StringVar(scrollable_frame)
self.bulktransfilestr[filestr] = tk.StringVar(scrollable_frame)
self.combo[filestr] = ttk.Combobox(scrollable_frame)
self.combo[filestr].grid(row=i, column=1)
e = ttk.Entry(scrollable_frame, textvariable=self.locidmatch[filestr], width=6)
e.grid(row=i, column=2)
self.combo[filestr]['values'] = list(df.sort_values(['locationname'])['locationname'].unique())
if 'locdict' in self.__dict__.keys():
if a in self.locnamedict.keys():
self.bulktransfilestr[filestr].set(self.locnamedict[a])
self.combo[filestr].set(self.locnamedict[a])
self.locidmatch[filestr].set(self.welldict[a])
self.inputforheadertable[filew_ext] = self.welldict[a]
self.combo[filestr].bind("<<ComboboxSelected>>",
lambda event, filestr=filestr: self.update_location_dicts(filestr))
i += 1
# self.filefnd.bind('<Double-ButtonRelease-1>', lambda f: self.grab_dir(dirselectframe))
self.bulk_match_button["state"] = "normal"
scrollable_frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
# scrollable_frame.pack(fill='both',side='left')
canvas.create_window((0, 0), window=scrollable_frame, anchor="nw")
canvas.configure(yscrollcommand=scrollbar.set, xscrollcommand=scrollbarx.set)
container.grid(row=3, column=0, columnspan=3)
canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
scrollbarx.pack(side="bottom", fill="x")
def update_location_dicts(self, filestr):
self.locidmatch[filestr].set(self.locnametoid[self.combo[filestr].get()])
def dropmenu(self, master):
# menu bars at the top of the main window
self.root = master
master.option_add('*tearOff', False)
menubar = tk.Menu(master)
master.config(menu=menubar)
file = tk.Menu(menubar)
edit = tk.Menu(menubar)
help_ = tk.Menu(menubar)
menubar.add_cascade(menu=file, label='File')
menubar.add_cascade(menu=edit, label='Edit')
menubar.add_cascade(menu=help_, label='Help')
file.add_command(label='New', command=lambda: print('New File'))
file.add_separator()
file.add_command(label="Open Config File...", command=self.open)
file.entryconfig('New', accelerator='Ctrl + N')
save = tk.Menu(file)
file.add_cascade(menu=save, label='Save')
save.add_command(label="Save Well Config", command=self.save)
save.add_command(label='Save As', command=lambda: print('save as'))
save.add_command(label='Save All', command=lambda: print('saving'))
file.add_command(label='Quit', command=self.root.destroy)
self.save_obj = {}
def save(self):
file = filedialog.asksaveasfile(mode="w", filetypes=[('text', '.txt')], defaultextension=".txt")
if file is None:
print('No File Selected')
return
else:
file.write("name, key, value\n")
for key, value in self.datastr.items():
file.write(f"datastr,{key},{value}\n")
for key, value in self.combo.items():
file.write(f"combo, {key},{value}\n")
for key, value in self.tabstate.items():
file.write(f"tabstate, {key},{value}\n")
file.close()
return
def open(self):
filename = filedialog.askopenfilename(filetypes=[('text', '.txt')])
if filename is None or filename == '':
return
else:
df = pd.read_csv(filename)
df[['name', 'key', 'value']].apply(lambda x: self.select_type(x), 1)
def select_type(self, x):
name = x[0]
key = x[1]
obj = x[2]
if name == 'datastr':
self.datastr[key] = obj
elif name == 'combo':
self.combo[key] = obj
elif name == 'tabstate':
self.tabstate[key] = obj
else:
pass
#### dataexplore-----------------------------------------------------------------------------------------------------
def currentTablePrefs(self):
"""Preferences dialog"""
table = self.getCurrentTable()
table.showPreferences()
return
def setConfigDir(self):
"""Set up config folder"""
homepath = os.path.join(os.path.expanduser('~'))
path = '.dataexplore'
self.configpath = os.path.join(homepath, path)
self.pluginpath = os.path.join(self.configpath, 'plugins')
if not os.path.exists(self.configpath):
os.mkdir(self.configpath)
os.makedirs(self.pluginpath)
return
def setStyles(self):
"""Set theme and widget styles"""
style = self.style = Style(self)
available_themes = self.style.theme_names()
plf = util.checkOS()
if plf == 'linux':
style.theme_use('default')
elif plf == 'darwin':
style.theme_use('clam')
self.bg = bg = self.style.lookup('TLabel.label', 'background')
style.configure('Horizontal.TScale', background=bg)
#set common background style for all widgets because of color issues
#if plf in ['linux','darwin']:
# self.option_add("*background", bg)
dialogs.applyStyle(self.menu)
return
def start_logging(self):
import logging
logging.basicConfig(filename=logfile,format='%(asctime)s %(message)s')
def createMenuBar(self):
"""Create the menu bar for the application. """
self.menu = tk.Menu(self.main)
file_menu = tk.Menu(self.menu, tearoff=0)
#add recent first
self.createRecentMenu(file_menu)
filemenuitems = {'01New Project':{'cmd': self.newProject},
'02Open Project':{'cmd': lambda: self.loadProject(asksave=True)},
'03Close':{'cmd':self.closeProject},
'04Save':{'cmd':self.saveProject},
'05Save As':{'cmd':self.saveasProject},
'06sep':'',
'07Import CSV':{'cmd':self.importCSV},
'08Import from URL':{'cmd':self.importURL},
'08Import Excel':{'cmd':self.importExcel},
'09Export CSV':{'cmd':self.exportCSV},
'10sep':'',
'11Quit':{'cmd':self.quit}}
self.file_menu = self.createPulldown(self.menu, filemenuitems, var=file_menu)
self.menu.add_cascade(label='File',menu=self.file_menu['var'])
editmenuitems = {'01Undo Last Change':{'cmd': self.undo},
'02Copy Table':{'cmd': self.copyTable},
'03Find/Replace':{'cmd':self.findText},
'04Preferences':{'cmd': self.currentTablePrefs}
}
self.edit_menu = self.createPulldown(self.menu, editmenuitems)
self.menu.add_cascade(label='Edit',menu=self.edit_menu['var'])
self.sheet_menu={'01Add Sheet':{'cmd': lambda: self.addSheet(select=True)},
'02Remove Sheet':{'cmd': lambda: self.deleteSheet(ask=True)},
'03Copy Sheet':{'cmd':self.copySheet},
'04Rename Sheet':{'cmd':self.renameSheet},
#'05Sheet Description':{'cmd':self.editSheetDescription}
}
self.sheet_menu = self.createPulldown(self.menu,self.sheet_menu)
self.menu.add_cascade(label='Sheet',menu=self.sheet_menu['var'])
self.view_menu={'01Zoom In':{'cmd': lambda: self._call('zoomIn')},
'02Zoom Out':{'cmd': lambda: self._call('zoomOut')},
'03Wrap Columns':{'cmd': lambda: self._call('setWrap')},
'04sep':'',
'05Dark Theme':{'cmd': lambda: self._call('setTheme', name='dark')},
'06Bold Theme':{'cmd': lambda: self._call('setTheme', name='bold')},
'07Default Theme':{'cmd': lambda: self._call('setTheme', name='default')},
}
self.view_menu = self.createPulldown(self.menu,self.view_menu)
self.menu.add_cascade(label='View',menu=self.view_menu['var'])
self.table_menu={'01Describe Table':{'cmd':self.describe},
'02Convert Column Names':{'cmd':lambda: self._call('convertColumnNames')},
'03Convert Numeric':{'cmd': lambda: self._call('convertNumeric')},
'04Clean Data': {'cmd': lambda: self._call('cleanData')},
'05Find Duplicates': {'cmd': lambda: self._call('findDuplicates')},
'06Correlation Matrix':{'cmd': lambda: self._call('corrMatrix')},
'07Concatenate Tables':{'cmd':self.concat},
'08Table to Text':{'cmd': lambda: self._call('showasText')},
'09Table Info':{'cmd': lambda: self._call('showInfo')},
'10sep':'',
'11Transform Values':{'cmd': lambda: self._call('transform')},
'12Group-Aggregate':{'cmd': lambda: self._call('aggregate')},
'13Cross Tabulation':{'cmd': lambda: self._call('crosstab')},
'14Merge/Concat Tables': {'cmd': lambda: self._call('doCombine')},
'15Pivot Table':{'cmd': lambda: self._call('pivot')},
'16Melt Table':{'cmd': lambda: self._call('melt')},
'17Time Series Resampling':{'cmd': lambda: self._call('resample')}
}
self.table_menu = self.createPulldown(self.menu,self.table_menu)
self.menu.add_cascade(label='Tools',menu=self.table_menu['var'])
self.plots_menu={'01Store plot':{'cmd':self.addPlot},
'02Clear plots':{'cmd':self.updatePlotsMenu},
'03PDF report':{'cmd':self.pdfReport},
'04sep':''}
self.plots_menu = self.createPulldown(self.menu,self.plots_menu)
self.menu.add_cascade(label='Plots',menu=self.plots_menu['var'])
self.help_menu={'01Online Help':{'cmd':self.online_documentation},
'02View Error Log':{'cmd':self.showErrorLog},
'03About':{'cmd':self.about}}
self.help_menu=self.createPulldown(self.menu,self.help_menu)
self.menu.add_cascade(label='Help',menu=self.help_menu['var'])
self.main.config(menu=self.menu)
return
def showErrorLog(self):
"""Open log file"""
f=open(logfile,'r')
s=''.join(f.readlines())
w = tk.Toplevel(self)
w.grab_set()
w.transient(self)
ed = SimpleEditor(w)
ed.pack(in_=w, fill=tk.BOTH, expand=tk.Y)
ed.text.insert(tk.END, s)
return
def createRecentMenu(self, menu):
"""Recent projects menu"""
from functools import partial
recent = self.appoptions['recent']
recentmenu = tk.Menu(menu)
menu.add_cascade(label="Open Recent", menu=recentmenu)
for r in recent:
recentmenu.add_command(label=r, command=partial(self.loadProject, r))
return
def bring_to_foreground(self, set_focus=False):
self.main.deiconify()
self.main.attributes('-topmost', True)
self.main.after_idle(self.main.attributes, '-topmost', False)
self.main.lift()
if set_focus:
#Looks like at least on Windows the following is required for the window
#to also get focus (deiconify, ..., iconify, deiconify)
import platform
if platform.system() != "Linux":
# http://stackoverflow.com/a/13867710/261181
self.main.iconify()
self.main.deiconify()
return
def getBestGeometry(self):
"""Calculate optimal geometry from screen size"""
ws = self.main.winfo_screenwidth()
hs = self.main.winfo_screenheight()
self.w = w = ws/1.4; h = hs*0.7
x = (ws/2)-(w/2); y = (hs/2)-(h/2)
g = '%dx%d+%d+%d' % (w,h,x,y)
return g
def setGeometry(self):
self.winsize = self.getBestGeometry()
self.main.geometry(self.winsize)
return
def createPulldown(self, menu, dict, var=None):
"""Create pulldown menu, returns a dict.
Args:
menu: parent menu bar
dict: dictionary of the form -
{'01item name':{'cmd':function name, 'sc': shortcut key}}
var: an already created menu
"""
if var is None:
var = tk.Menu(menu,tearoff=0)
items = list(dict.keys())
items.sort()
for item in items:
if item[-3:] == 'sep':
var.add_separator()
else:
command = dict[item]['cmd']
label = '%-25s' %(item[2:])
if 'img' in dict[item]:
img = dict[item]['img']
else:
img = None
if 'sc' in dict[item]:
sc = dict[item]['sc']
#bind command
#self.main.bind(sc, command)
else:
sc = None
var.add('command', label=label, command=command, image=img,
compound="left")#, accelerator=sc)
dict['var'] = var
return dict
def progressDialog(self):
t = tk.Toplevel(self)
pb = tk.Progressbar(t, mode="indeterminate")
pb.pack(side="bottom", fill=tk.X)
t.title('Progress')
t.transient(self)
t.grab_set()
t.resizable(width=False, height=False)
return pb
def loadMeta(self, table, meta):
"""Load meta data for a sheet, this includes plot options and
table selections"""
tablesettings = meta['table']
if 'childtable' in meta:
childtable = meta['childtable']
childsettings = meta['childselected']
else:
childtable = None
#load plot options
opts = {'mplopts': table.pf.mplopts,
'mplopts3d': table.pf.mplopts3d,
'labelopts': table.pf.labelopts
}
for m in opts:
if m in meta and meta[m] is not None:
#util.setAttributes(opts[m], meta[m])
opts[m].updateFromOptions(meta[m])
#check options loaded for missing values
#avoids breaking file saves when options changed
defaults = plotting.get_defaults(m)
for key in defaults:
if key not in opts[m].opts:
opts[m].opts[key] = defaults[key]
#load table settings
util.setAttributes(table, tablesettings)
#load plotviewer
if 'plotviewer' in meta:
#print (meta['plotviewer'])
util.setAttributes(table.pf, meta['plotviewer'])
table.pf.updateWidgets()
if childtable is not None:
table.createChildTable(df=childtable)
util.setAttributes(table.child, childsettings)
#redraw col selections
if type(table.multiplecollist) is tuple:
table.multiplecollist = list(table.multiplecollist)
table.drawMultipleCols()
return
def saveMeta(self, table):
"""Save meta data such as current plot options"""
meta = {}
#save plot options
meta['mplopts'] = table.pf.mplopts.kwds
meta['mplopts3d'] = table.pf.mplopts3d.kwds
meta['labelopts'] = table.pf.labelopts.kwds
#print (table.pf.mplopts.kwds)
#save table selections
meta['table'] = util.getAttributes(table)
meta['plotviewer'] = util.getAttributes(table.pf)
#print (meta['plotviewer'])
#save row colors since its a dataframe and isn't picked up by getattributes currently
meta['table']['rowcolors'] = table.rowcolors
#save child table if present
if table.child != None:
meta['childtable'] = table.child.model.df
meta['childselected'] = util.getAttributes(table.child)
return meta
def saveAppOptions(self):
"""Save global app options to config dir"""
appfile = os.path.join(self.configpath, 'app.p')
file = open(appfile,'wb')
pickle.dump(self.appoptions, file, protocol=2)
file.close()
return
def loadAppOptions(self):
"""Load global app options if present"""
appfile = os.path.join(self.configpath, 'app.p')
if os.path.exists(appfile):
self.appoptions = pickle.load(open(appfile,'rb'))
else:
self.appoptions = {}
self.appoptions['recent'] = []
return
def newProject(self, data=None, df=None):
"""Create a new project from data or empty"""
w = self.closeProject()
if w == None:
return
self.sheets = OrderedDict()
self.sheetframes = {} #store references to enclosing widgets
self.openplugins = {} #refs to running plugins
self.updatePlotsMenu()
for n in self.notebook.tabs():
self.notebook.forget(n)
if data != None:
for s in sorted(data.keys()):
if s == 'meta':
continue
df = data[s]['table']
if 'meta' in data[s]:
meta = data[s]['meta']
else:
meta=None
#try:
self.addSheet(s, df, meta)
'''except Exception as e:
print ('error reading in options?')
print (e)'''
else:
pass
self.filename = None
self.projopen = True
self.main.title('DataExplore')
return
def loadProject(self, filename=None, asksave=False):
"""Open project file"""
w=True
if asksave == True:
w = self.closeProject()
if w == None:
return
if filename == None:
filename = filedialog.askopenfilename(defaultextension='.dexpl"',
initialdir=self.defaultsavedir,
filetypes=[("project","*.dexpl"),
("All files","*.*")],
parent=self.main)
if not filename:
return
if not os.path.exists(filename):
print ('no such file')
self.removeRecent(filename)
return
ext = os.path.splitext(filename)[1]
if ext != '.dexpl':
print ('does not appear to be a project file')
return
if os.path.isfile(filename):
#new format uses pickle
try:
data = pickle.load(gzip.GzipFile(filename, 'r'))
except OSError as oe:
msg = 'DataExplore can no longer open the old format project files.\n'\
'if you really need the file revert to pandastable<=0.12.1 and save the data.'
messagebox.showwarning("Project open error", msg)
return
#create backup file before we change anything
#backupfile = filename+'.bak'
#pd.to_msgpack(backupfile, data, encoding='utf-8')
else:
print ('no such file')
self.quit()
return
self.newProject(data)
self.filename = filename
self.main.title('%s - DataExplore' %filename)
self.projopen = True
self.defaultsavedir = os.path.dirname(os.path.abspath(filename))
self.addRecent(filename)
return
def removeRecent(self, filename):
"""Remove file from recent list"""
recent = self.appoptions['recent']
if filename in recent:
recent.remove(filename)
self.saveAppOptions()
return
def addRecent(self, filename):
"""Add file name to recent projects"""
recent = self.appoptions['recent']
if not os.path.abspath(filename) in recent:
if len(recent)>=5:
recent.pop(0)
recent.append(os.path.abspath(filename))
self.saveAppOptions()
return
def saveProject(self, filename=None):
"""Save project"""
if filename != None:
self.filename = filename
if not hasattr(self, 'filename') or self.filename == None:
self.saveasProject()
else:
self.doSaveProject(self.filename)
return
def saveasProject(self):
"""Save as a new filename"""
filename = filedialog.asksaveasfilename(parent=self.main,
defaultextension='.dexpl',
initialdir=self.defaultsavedir,
filetypes=[("project","*.dexpl")])
if not filename:
return
self.filename = filename
self.defaultsavedir = os.path.dirname(os.path.abspath(filename))
self.doSaveProject(self.filename)
self.addRecent(filename)
return
def doSaveProject(self, filename):
"""Save sheets as dict in msgpack"""
self._checkTables()
data={}
for i in self.sheets:
table = self.sheets[i]
data[i] = {}
data[i]['table'] = table.model.df
data[i]['meta'] = self.saveMeta(table)
#pd.to_msgpack(filename, data, encoding='utf-8')
#changed to pickle format
file = gzip.GzipFile(filename, 'w')
pickle.dump(data, file)
return
def _checkTables(self):
"""Check tables before saving that so we are not saving
filtered copies"""
for s in self.sheets:
t=self.sheets[s]
if t.filtered==True:
t.showAll()
return
def closeProject(self):
"""Close"""
if self.projopen == False:
w = False
else:
w = messagebox.askyesnocancel("Close Project",
"Save this project?",
parent=self.master)
if w==None:
return
elif w==True:
self.saveProject()
else:
pass
for n in self.notebook.tabs():
self.notebook.forget(n)
self.filename = None
self.projopen = False
self.main.title('DataExplore')
return w
def importCSV(self):
"""Import csv to a new sheet"""
self.addSheet(select=True)
table = self.getCurrentTable()
table.importCSV(dialog=True)
return
def importURL(self):
"""Import CSV from URL"""
url = simpledialog.askstring("Import url", "Input CSV URL",
parent=self.master)
if url is not None:
name = os.path.basename(url)
df = pd.read_csv(url)
self.addSheet(sheetname=name, df=df, select=True)
return
def exportCSV(self):
"""Import csv to a new sheet"""
table = self.getCurrentTable()
table.doExport()
return
def importExcel(self, filename=None):
if filename is None:
filename = filedialog.askopenfilename(parent=self.master,
defaultextension='.xls',
initialdir=os.getcwd(),
filetypes=[("xls","*.xls"),
("xlsx","*.xlsx"),
("All files","*.*")])
data = pd.read_excel(filename,sheetname=None)
for n in data:
self.addSheet(n, df=data[n], select=True)
return
def load_dataframe(self, df, name=None, select=False):
"""Load a DataFrame into a new sheet
Args:
df: dataframe
name: name of new sheet
select: set new sheet as selected
"""
if hasattr(self,'sheets'):
self.addSheet(sheetname=name, df=df, select=select)
else:
data = {name:{'table':df}}
self.newProject(data)
return
def load_msgpack(self, filename):
"""Load a msgpack file"""
size = round((os.path.getsize(filename)/1.0485e6),2)
print(size)
df = pd.read_msgpack(filename)
name = os.path.splitext(os.path.basename(filename))[0]
self.load_dataframe(df, name)
return
def load_pickle(self, filename):
"""Load a pickle file"""
df = pd.read_pickle(filename)
name = os.path.splitext(os.path.basename(filename))[0]
self.load_dataframe(df, name)
return
def getData(self, name):
"""Get predefined data from dataset folder"""
filename = os.path.join(self.modulepath, 'datasets', name)
df = pd.read_csv(filename, index_col=0)
name = os.path.splitext(os.path.basename(filename))[0]
self.load_dataframe(df, name, select=True)
return
def addSheet(self, sheetname=None, df=None, meta=None, select=False):
"""Add a sheet with new or existing data"""
names = [self.notebook.tab(i, "text") for i in self.notebook.tabs()]
def checkName(name):
if name == '':
messagebox.showwarning("Whoops", "Name should not be blank.")
return 0
if name in names:
messagebox.showwarning("Name exists", "Sheet name already exists!")
return 0
noshts = len(self.notebook.tabs())
if sheetname == None:
sheetname = simpledialog.askstring("New sheet name?", "Enter sheet name:",
initialvalue='sheet'+str(noshts+1))
if sheetname == None:
return
if checkName(sheetname) == 0:
return
#Create the table
main = ttk.PanedWindow(orient=tk.HORIZONTAL)
self.sheetframes[sheetname] = main
self.notebook.add(main, text=sheetname)
f1 = ttk.Frame(main)
table = Table(f1, dataframe=df, showtoolbar=1, showstatusbar=1)
f2 = ttk.Frame(main)
#show the plot frame
pf = table.showPlotViewer(f2, layout='horizontal')
#load meta data
if meta != None:
self.loadMeta(table, meta)
#add table last so we have save options loaded already
main.add(f1,weight=3)
table.show()
main.add(f2,weight=4)
if table.plotted == 'main':
table.plotSelected()
elif table.plotted == 'child' and table.child != None:
table.child.plotSelected()
self.saved = 0
self.currenttable = table
#attach menu state of undo item so that it's disabled after an undo
#table.undo_callback = lambda: self.toggleUndoMenu('active')
self.sheets[sheetname] = table
if select == True:
ind = self.notebook.index('end')-1
s = self.notebook.tabs()[ind]
self.notebook.select(s)
return sheetname
def deleteSheet(self, ask=False):
"""Delete a sheet"""
s = self.notebook.index(self.notebook.select())
name = self.notebook.tab(s, 'text')
w=True
if ask == True:
w = messagebox.askyesno("Delete Sheet",
"Remove this sheet?",
parent=self.master)
if w==False:
return
self.notebook.forget(s)
del self.sheets[name]
del self.sheetframes[name]
return
def copySheet(self, newname=None):
"""Copy a sheet"""
currenttable = self.getCurrentTable()
newdata = currenttable.model.df
meta = self.saveMeta(currenttable)
self.addSheet(newname, df=newdata, meta=meta)
return
def renameSheet(self):
"""Rename a sheet"""
s = self.notebook.tab(self.notebook.select(), 'text')
newname = simpledialog.askstring("New sheet name?",
"Enter new sheet name:",
initialvalue=s)
if newname == None:
return
self.copySheet(newname)
self.deleteSheet()
return
def editSheetDescription(self):
"""Add some meta data about the sheet"""
w = tk.Toplevel(self.main)
w.grab_set()
w.transient(self)
ed = SimpleEditor(w, height=25)
ed.pack(in_=w, fill=tk.BOTH, expand=tk.Y)
#ed.text.insert(END, buf.getvalue())
return
def getCurrentSheet(self):
"""Get current sheet name"""
s = self.notebook.index(self.notebook.select())
name = self.notebook.tab(s, 'text')
return name
def getCurrentTable(self):
s = self.notebook.index(self.notebook.select())
name = self.notebook.tab(s, 'text')
table = self.sheets[name]
return table
def getSheetList(self):
return list(self.sheets.keys())
def describe(self):
"""Describe dataframe"""
table = self.getCurrentTable()
df = table.model.df
d = df.describe()
table.createChildTable(d,index=True)
return
def findText(self):
table = self.getCurrentTable()
table.findText()
return
def concat(self):
"""Concat 2 tables"""
vals = list(self.sheets.keys())
if len(vals)<=1:
return
d = MultipleValDialog(title='Concat',
initialvalues=(vals,vals),
labels=('Table 1','Table 2'),
types=('combobox','combobox'),
parent = self.master)
if d.result == None:
return
else:
s1 = d.results[0]
s2 = d.results[1]
if s1 == s2:
return
df1 = self.sheets[s1].model.df
df2 = self.sheets[s2].model.df
m = pd.concat([df1,df2])
self.addSheet('concat-%s-%s' %(s1,s2),m)
return
def getStackedData(self):
df = TableModel.getStackedData()
self.addSheet(sheetname='stacked-data', df=df)
return
def copyTable(self, subtable=False):
"""Copy current table dataframe"""
table = self.getCurrentTable()
table.model.df.to_clipboard()
return
def pasteTable(self, subtable=False):
"""Paste copied dataframe into current table"""
#add warning?
if self.clipboarddf is None:
return
df = self.clipboarddf
table = self.getCurrentTable()
if subtable == True:
table.createChildTable(df)
else:
model = TableModel(df)
table.updateModel(model)
return
def hidePlot(self):
name = self.getCurrentSheet()
pw = self.sheetframes[name]
pw.forget(1)
return
def showPlot(self):
name = self.getCurrentSheet()
table = self.sheets[name]
pw = self.sheetframes[name]
pw.add(table.pf, weight=2)
return
def addPlot(self):
"""Store the current plot so it can be re-loaded"""
import pickle
name = self.getCurrentSheet()
table = self.sheets[name]
fig = table.pf.fig
t = time.strftime("%H:%M:%S")
label = name+'-'+t
#dump and reload the figure to get a new object
p = pickle.dumps(fig)
fig = pickle.loads(p)
self.plots[label] = fig
def func(label):
fig = self.plots[label]
win = tk.Toplevel()
win.title(label)
plotting.addFigure(win, fig)
menu = self.plots_menu['var']
menu.add_command(label=label, command=lambda: func(label))
return
def updatePlotsMenu(self, clear=True):
"""Clear stored plots"""
if clear == True:
self.plots = {}
menu = self.plots_menu['var']
menu.delete(4, menu.index(tk.END))
return
def pdfReport(self):
"""Create pdf report from stored plots"""
from matplotlib.backends.backend_pdf import PdfPages
filename = filedialog.asksaveasfilename(parent=self.main,
defaultextension='.pdf',
initialdir=self.defaultsavedir,
filetypes=[("pdf","*.pdf")])
if not filename:
return
pdf_pages = PdfPages(filename)
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
for p in self.plots:
fig = self.plots[p]
canvas = FigureCanvasTkAgg(fig, master=self)
pdf_pages.savefig(fig)
pdf_pages.close()
return
def undo(self):
"""Restores last version of current table"""
table = self.getCurrentTable()
table.undo()
#self.toggleUndoMenu('disabled')
return
def toggleUndoMenu(self, state='active'):
menu = self.edit_menu['var']
menu.entryconfigure(0, state=state)
return
def _call(self, func, **args):
"""Call a table function from it's string name"""
table = self.getCurrentTable()
getattr(table, func)(**args)
return
def about(self):
"""About dialog"""
abwin = tk.Toplevel()
x = 400
y = 400
w = 600
h = 600
abwin.geometry('+%d+%d' %(x+w/2-200,y+h/2-200))
abwin.title('About')
abwin.transient(self)
abwin.grab_set()
abwin.resizable(width=False, height=False)
abwin.configure(background=self.bg)
logo = "G:/My Drive/Python/Pycharm/loggerloader/data_files/GeologicalSurvey.png"
label = tk.Label(abwin,image=logo,anchor=tk.CENTER)
label.image = logo
label.grid(row=0,column=0,sticky='ew',padx=4,pady=4)
pandasver = pd.__version__
pythonver = platform.python_version()
mplver = matplotlib.__version__
text='Logger Loader\n'\
+'Processing scripts Written By Paul Inkenbrandt, Utah Geological Survey' \
+'Graphing and Table functions from pandastable by Damien Farrell 2014-\n'\
+'This program is free software; you can redistribute it and/or\n'\
+'modify it under the terms of the GNU General Public License\n'\
+'as published by the Free Software Foundation; either version 3\n'\
+'of the License, or (at your option) any later version.\n'\
+f'Using Python v{pythonver}\n'\
+f'pandas v{pandasver}, matplotlib v{mplver}'
row=1
#for line in text:
tmp = tk.Label(abwin, text=text, style="BW.TLabel")
tmp.grid(row=row,column=0,sticky='news',pady=2,padx=4)
return
def online_documentation(self,event=None):
"""Open the online documentation"""
import webbrowser
link='https://pandastable.readthedocs.io/en/latest/'
webbrowser.open(link,autoraise=1)
return
def quit(self):
self.main.destroy()
return
### dataexplore
def main():
root = tk.Tk()
feedback = Feedback(root)
root.mainloop()
# tkinter.mainloop()
# If you put root.destroy() here, it will cause an error if the window is
# closed with the window manager.
if __name__ == "__main__": main()
| mit |
sbg2133/miscellaneous_projects | carina/lic_thesis_highSL.py | 1 | 6145 | from getIQU import IQU
from subprocess import call
import sys,os
import numpy as np
import glob
import matplotlib
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.io import fits
import scipy.ndimage
from makePretty import pretty
import aplpy
from skimage import filters
#matplotlib.rcParams.update({'font.size':16})
save_files_here = "/home/wizwit/SESE_dissertation/figures/chapter6"
plt.ion()
if len(sys.argv) < 2:
print "You must supply a band, 'e.g., carina_lic.py 250'"
sys.exit()
else:
band = sys.argv[1]
# Call this function with desired band as first argument, e.g.:
# python carina_lic.py 250
# lists over which to iterate:
bands = ['250', '350', '500']
stokes = ['I', 'Q', 'U']
pol_eff = [0.81, 0.79, 0.82]
# define file paths
blastpol_dir = './carinaData/smooth/3.0_arcmin'
filename = glob.glob(blastpol_dir + '/carinaneb_' + band + '_smoothed_3.0_rl.fits')[0]
# load in I, Q, U for desired band
Ivals, Qvals, Uvals, __, wcs = IQU(filename)
I = Ivals[30:-30,260:-260]
Q = Qvals[30:-30,260:-260]
U = Uvals[30:-30,260:-260]
Pvals = np.sqrt(Q**2 + U**2)
pvals = Pvals/I
# Correct pvals as in Jamil's thesis, 5.7
pvals[pvals > 0.5] = np.nan
pvals[pvals < 0] = np.nan
pvals /= pol_eff[bands.index(band)]
phi = 0.5*np.arctan2(U,Q)
#dx = np.cos(phi)
#dy = np.sin(phi)
dx = pvals*np.cos(phi)
dy = pvals*np.sin(phi)
mag = np.sqrt(dx**2 + dy**2)
X = np.linspace(0, I.shape[1], I.shape[1])
Y = np.linspace(0, I.shape[0], I.shape[0])
xs, ys = np.meshgrid(X,Y)
"""
plt.figure()
nskip = 2
skip = (slice(None, None, nskip), slice(None, None, nskip))
#f = aplpy.FITSFigure(I, figsize = (10.24,7.68), dpi = 100)
ax = plt.gca()
ax.imshow(I, cmap = "gist_heat")
#f.tick_labels.set_font(size='small')
#f.show_colorscale(cmap='gist_heat')
# Add polarization vectors
ax.quiver(xs[skip],ys[skip],(dx/mag)[skip],(dy/mag)[skip], color = "white", angles = 'xy', units = 'xy', scale_units = 'xy', scale = 0.3)
#f.show_vectors(pvals, phi, color = 'white', rotate = 90., scale = 50, step = 10)
ax.set_facecolor('black')
plt.tight_layout()
"""
xsize, ysize = len(X), len(Y)
vectors = np.array([dx,dy])
#white = np.random.rand(xsize, ysize)
#white = np.random.uniform(low = 0., high = 1., size = (xsize, ysize))
white = np.random.normal(0., 1., size = (xsize,ysize))
sigma = 1.2
white = scipy.ndimage.gaussian_filter(white, sigma)
with file('texture.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
if len(sys.argv) > 2:
print
print "Doing LIC"
print
command = ["./carina_lic", str(xsize), str(ysize)]
call(command)
lic = np.loadtxt("./lic.dat")
lic = np.transpose(lic)
#np.save('lic.npy', lic)
#lic2 = np.load("lic.npy")
#lic = scipy.ndimage.gaussian_filter(lic, 1.01)
mult = lic * I
"""
blur_size = 8
unsharp_strength = 0.8
blurred = filter.gaussian_filter(lic, blur_size)
highpass = lic - unsharp_strength*blurred
sharp = lic + highpass
lowpass = scipy.ndimage.gaussian_filter(lic, 5)
highpass = lic - lowpass
highpass += lic
"""
"""
hdu2 = fits.PrimaryHDU(data=np.zeros_like(I), header=wcs.to_header())
f2 = aplpy.FITSFigure(hdu2, figsize = (10,10))
f2.set_theme('publication')
ax = plt.gca()
ax.set_facecolor("k")
f2.add_scalebar(15/60.) # arcmin
f2.scalebar.set_label('0.5 deg')
f2.scalebar.set_color('white')
f2.scalebar.set_corner('bottom right')
f2.scalebar.set_label('10 pc')
f2.tick_labels.set_yformat('dd.dd')
f2.tick_labels.set_xformat('dd.dd')
f2.axis_labels.set_font(size=16)
f2.tick_labels.set_font(size=14)
plt.imshow(I, origin = 'lower', cmap = "inferno", alpha = 1)
#plt.imshow(lic, vmin = -0.07, vmax = 0.3, origin = 'lower', cmap = "gray", alpha = 0.4, interpolation = "bilinear")
plt.imshow(lic, vmin = -0.06, vmax = 0.25, origin = 'lower', cmap = "gray", alpha = 0.5, interpolation = "bilinear")
plt.tight_layout()
#f2.savefig(os.path.join(save_files_here, 'lic_han_51.eps'), format='eps', dpi=1000, transparent = True)
plt.savefig(os.path.join(save_files_here, 'lic_han_51.png'), format='png', bbox_inches = 'tight')
##################################################
# For 250 um: v = 1000
# For 350 um: v = 500
# For 500 um: v = 200
hdu3 = fits.PrimaryHDU(data=np.zeros_like(I), header=wcs.to_header())
f3 = aplpy.FITSFigure(hdu3, figsize = (10,10))
f3.set_theme('publication')
# scalebar
ax = plt.gca()
ax.set_facecolor("k")
f3.add_scalebar(15/60.) # arcmin
f3.scalebar.set_label('0.5 deg')
f3.scalebar.set_color('white')
f3.scalebar.set_corner('bottom right')
f3.scalebar.set_label('10 pc')
f3.tick_labels.set_yformat('dd.dd')
f3.tick_labels.set_xformat('dd.dd')
f3.axis_labels.set_font(size=16)
f3.tick_labels.set_font(size=14)
vmin = [-0.000007, 0.4, 0.5]
vmax = [0.00007, 0.4, 0.5]
plt.imshow(mult, origin = 'lower',\
cmap = "inferno", vmin = vmin[bands.index(band)],\
vmax = vmax[bands.index(band)], interpolation = 'bilinear')
#plt.tight_layout()
plt.savefig(os.path.join(save_files_here, 'lic2_han51.png'), format='png', bbox_inches = 'tight')
"""
hdu4 = fits.PrimaryHDU(data=np.zeros_like(I), header=wcs.to_header())
f4 = aplpy.FITSFigure(hdu4, figsize = (10,10))
f4.set_theme('publication')
# scalebar
ax = plt.gca()
ax.set_facecolor("k")
f4.add_scalebar(15/60.) # arcmin
f4.scalebar.set_label('0.5 deg')
f4.scalebar.set_color('white')
f4.scalebar.set_corner('bottom right')
f4.scalebar.set_label('10 pc')
f4.tick_labels.set_yformat('dd.dd')
f4.tick_labels.set_xformat('dd.dd')
f4.axis_labels.set_font(size=16)
f4.tick_labels.set_font(size=14)
vmin = [-0.000007, 0.4, 0.5]
vmax = [0.00007, 0.4, 0.5]
plt.imshow(mult, origin = 'lower',\
cmap = "viridis", vmin = vmin[bands.index(band)],\
vmax = vmax[bands.index(band)], interpolation = 'bilinear')
#plt.tight_layout()
plt.savefig(os.path.join(save_files_here, 'lic2_viridis.png'), format='png', bbox_inches = 'tight')
| gpl-3.0 |
466152112/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
rtrwalker/geotecha | geotecha/consolidation/xieandleo2004.py | 1 | 32571 | # geotecha - A software suite for geotechncial engineering
# Copyright (C) 2018 Rohan T. Walker (rtrwalker@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/gpl.html.
"""
Xie and Leo (2004) "Analytical solutions of one-dimensional large strain
consolidation of saturated and homogeneous clays".
"""
from __future__ import print_function, division
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import scipy.integrate as integrate
class XieAndLeo2004(object):
"""Large strain analytical one-dimensional consolidation
Implementation of Xie and Leo (2004)[1]_.
Features:
- Single layer, vertical flow.
- Large strain.
- Instant applied load uniform with depth
- Vert permeability kv = (1+e)**2/(1+e0)**2
- Large strain volume compressibility is constant over time
-
Parameters
----------
qp : float
Existing load.
qp : float
Instant applied load.
H : float
Initial thickness of clay layer.
Hw : float
Height of water surface above initial surface.
kv0 : float
Coefficient of vertical permeability.
mvl : float
Coefficient of volume compressibility.
e00 : float
Initial void ratio at surface.
Gs : float
Specific gravity of solids.
gamw : float, optional
Unit weight of water. Default gamw=10
drn : [0,1]
Drainage condition. drn=0 is PTPB, drn=1 is PTIB, default=0.
nterms : int, optional
Number of summation terms. Default nterms=100.
Notes
-----
Basically initialize the XieAndLeo2004 object, then use individual methods
of the data to extract data at particualr depths and times.
The most common error is if input data is not numpy arrays.
See Also
--------
xie_and_leo2004_figure_4 : example of use.
xie_and_leo2004_figure_5 : example of use.
xie_and_leo2004_figure_6 : example of use.
References
----------
.. [1] Xie, K. H., and C. J. Leo. "Analytical Solutions of
One-Dimensional Large Strain Consolidation of Saturated and
Homogeneous Clays". Computers and Geotechnics 31,
no. 4 (June 2004): 301-14.
doi:10.1016/j.compgeo.2004.02.006.
"""
def __init__(self, qu, qp, H, Hw, kv0, mvl, e00, Gs,
gamw=10, drn=0, nterms=100):
self.qu = qu
self.qp = qp
self.H = H
self.Hw = Hw
self.kv0 = kv0
self.mvl = mvl
self.e00 = e00
self.Gs = Gs
self.gamw = gamw
self.drn = drn
self.nterms = nterms
self._derived_parameters()
def _derived_parameters(self):
"""calculate parameters that derive from the input parameters
"""
self.M = np.pi * (np.arange(self.nterms) + 0.5)
# if drn==1:
# self.M = np.pi * (np.arange(self.nterms) + 0.5)
# else:
# self.M = np.pi * np.arange(1, self.nterms + 1.0)
self.cv0 = self.kv0 / (self.mvl * self.gamw)
self.dTv = self.cv0 / self.H**2
def Tv(self, t):
"""Calculate vertical time factor
Parameters
----------
t : array-like of float
Time(s).
Returns
-------
Tv : float
Time factor
"""
return self.dTv * t
def e0(self, a):
"""Initial void ratio at depth
Parameters
----------
a : array like of float
Depth coord.
Returns
-------
e0 : float
Initial void ratio at depth a.
"""
e0 = self.e00 -self.mvl * self.gamw * (self.Gs - 1) * a
return e0
def efinal(self, a):
"""Final void ration ratio at depth
Parameters
----------
a : array like of float
Depth coord.
Returns
-------
efinal : float
Final void ratio at depth a.
"""
mvl = self.mvl
qu = self.qu
e0 = self.e0(a)
efinal = (1 + e0) * np.exp(-mvl * qu) - 1
return efinal
def settlement_final(self):
"""Final settlement of clay layer"""
return self.H * (1 - np.exp(-self.mvl * self.qu))
def initial_effective_stress(self, a):
"""Initial effective stress
Parameters
----------
a : array like of float
Depth coord
Returns
-------
eff0 : float
Initial effective stress at depth a
"""
qp = self.qp
e00 = self.e00
mvl = self.mvl
Gs = self.Gs
gamw = self.gamw
f = qp + 1/mvl * np.log((1+e00)/(1+e00 - mvl * gamw * (Gs-1) * a))
return f
def u_PTIB(self, a, t):
"""Pore pressure for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
u : float of size (len(a), len(t))
Excess pore pressure at depth a, time t.
"""
# a = np.atleast_1d(a)
Tv = self.Tv(t)[None, :, None]
a = (a/ self.H)[:, None, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, None, :]
f = 2 / M * np.sin(M * a) * np.exp(-M**2 * Tv)
f = np.sum(f, axis=2)
f *= np.exp(mvl * qu) - 1
f += 1
np.log(f, out=f)
f /= mvl
return f
def u_PTPB(self, a, t):
"""Pore pressure for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
u : float of size (len(a), len(t))
Excess pore pressure at depth a, time t.
"""
Tv = self.Tv(t)[None, :, None]
a = (a/ self.H)[:, None, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, None, :]
f = 2 / M * np.sin(2 * M * a) * np.exp(-4 * M**2 * Tv)
f = np.sum(f, axis=2)
f *= np.exp(mvl * qu) - 1
f += 1
np.log(f, out=f)
f /= mvl
return f
def settlement_PTIB(self, a, t):
"""Settlement for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
settlement : float of size (len(a), len(t))
Settlement at depth a, time t.
"""
Tv = self.Tv(t)[None, :, None]
a_ = (a /self.H)[:, None]
a = (a/ self.H)[:, None, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, None, :]
H = self.H
f = 2 / M**2 * np.cos(M * a) * np.exp(-M**2 * Tv)
f = -np.sum(f, axis=2)
f += 1 - a_
f *= 1 - np.exp(-mvl * qu)
f *= H
return f
def settlement_PTPB(self, a, t):
"""Settlement for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
settlement : float of size (len(a), len(t))
Settlement at depth a, time t.
"""
Tv = self.Tv(t)[None, :, None]
a_ = (a /self.H)[:, None]
a = (a/ self.H)[:, None, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, None, :]
H = self.H
f = 1 / M**2 * (1 + np.cos(2 * M * a)) * np.exp(-4 * M**2 * Tv)
f = -np.sum(f, axis=2)
f += 1 - a_
f *= 1 - np.exp(-mvl * qu)
f *= H
return f
def Us_PTIB(self, t):
"""Average degree of consolidation from settlement for PTIB drainage
Parameters
----------
t : array like of float
Time coord.
Returns
-------
Us : array of float of size len(t)
Settlement degree of consolidation at time t
"""
Tv = self.Tv(t)[:, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, :]
f = 2 / M**2 * np.exp(-M**2 * Tv)
f = np.sum(f, axis=1)
f*=-1
f +=1
return f
def Us_PTPB(self, t):
"""Average degree of consolidation from settlement for PTPB drainage
Parameters
----------
t : array like of float
Time coord.
Returns
-------
Us : array of float of size len(t)
Settlement degree of consolidation at time t.
"""
Tv = self.Tv(t)[:, None]
mvl = self.mvl
qu = self.qu
M = self.M[None, :]
f = 2 / M**2 * np.exp(-4 * M**2 * Tv)
f = np.sum(f, axis=1)
f*=-1
f +=1
return f
def Up_PTIB(self, t):
"""Average degree of consolidation from p.press for PTIB drainage
Parameters
----------
t : array like of float
Time coord.
Returns
-------
Up : array of float of size len(t)
Pore pressure average degree of consolidation at time t/
"""
def u(a, t):
"""wrapper for self.u_PTIB for scalar args"""
a = np.atleast_1d(a)
t = np.atleast_1d(t)
return self.u_PTIB(a,t)[0, 0]
qu = self.qu
H = self.H
f = np.empty(len(t), dtype=float)
#TODO: replace call to quad with my own numerical integrations to avoid scipy license
for i, t_ in enumerate(t):
f[i] = 1 - 1.0 / (H * qu) * integrate.quad(u, 0, H, (t_,))[0]
return f
def Up_PTPB(self, t):
"""Average degree of consolidation from p.press for PTPB drainage
Parameters
----------
t : array like of float
Time coord.
Returns
-------
Up : array of float of size len(t)
Pore pressure average degree of consolidation at time t.
"""
def u(a, t):
"""wrapper for self.u_PTPB for scalar args"""
a = np.atleast_1d(a)
t = np.atleast_1d(t)
return self.u_PTPB(a,t)[0, 0]
qu = self.qu
H = self.H
f = np.empty(len(t), dtype=float)
#TODO: replace call to quad with my own numerical integrations to avoid scipy license
for i, t_ in enumerate(t):
f[i] = 1 - 1.0 / (H * qu) * integrate.quad(u, 0, H, (t_,))[0]
return f
def effective_stress_PTIB(self, a, t):
"""Effective stress for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
effective_stress : float of size (len(a), len(t))
Effective stress at depth a, time t.
"""
u = self.u_PTIB(a,t)
sig_0 = self.initial_effective_stress(a)[:, None]
sig_ = sig_0 + qu - u
return sig_
def effective_stress_PTPB(self, a, t):
"""Effective stress for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
effective_stress : float of size (len(a), len(t))
Effective stress at depth a, time t.
"""
u = self.u_PTPB(a,t)
sig_0 = self.initial_effective_stress(a)[:, None]
sig_ = sig_0 + qu - u
return sig_
def total_stress_PTIB(self, a, t):
"""Total stress for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
total_stress : float of size (len(a), len(t))
Total stress at depth a, time t.
"""
gamw = self.gamw
Hw = self.Hw
S = self.settlement_PTIB(a, t)
sig_0 = self.initial_effective_stress(a)[:, None]
a = a[:, None]
sig = sig_0 + qu + gamw * (Hw + a + S)
return sig
def total_stress_PTPB(self, a, t):
"""Total stress for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
total_stress : float of size (len(a), len(t))
Total stress at depth a, time t.
"""
gamw = self.gamw
Hw = self.Hw
S = self.settlement_PTPB(a, t)
sig_0 = self.initial_effective_stress(a)[:, None]
a = a[:, None]
sig = sig_0 + qu + gamw * (Hw + a + S)
return sig
def total_pore_pressure_PTIB(self, a, t):
"""Total pore pressure for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
total_pore_pressure : float of size (len(a), len(t))
Total pore pressure at depth a, time t.
"""
gamw = self.gamw
Hw = self.Hw
u = self.u_PTIB(a,t)
S = self.settlement_PTIB(a, t)
a = a[:, None]
p = u + gamw * (Hw + a + S)
return p
def total_pore_pressure_PTPB(self, a, t):
"""Total pore pressure for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
total_pore_pressure : float of size (len(a), len(t))
Total pore pressure at depth a, time t.
"""
gamw = self.gamw
Hw = self.Hw
u = self.u_PTPB(a,t)
S = self.settlement_PTPB(a, t)
a = a[:, None]
p = u + gamw * (Hw + a + S)
return p
def e_PTIB(self, a, t):
"""Void ration for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
e : float of size (len(a), len(t))
Void ration at depth a, time t.
"""
e0 = self.e0(a)[:, None]
efinal = self.efinal(a)[:, None]
Tv = self.Tv(t)[None, :, None]
a = (a/ self.H)[:, None, None]
M = self.M[None, None, :]
f = 2 / M * np.sin(M * a) * np.exp(-M**2 * Tv)
f = np.sum(f, axis=2)
f *= e0 - efinal
f += efinal
return f
def e_PTPB(self, a, t):
"""Void ration for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
e : float of size (len(a), len(t))
Void ration at depth a, time t.
"""
e0 = self.e0(a)[:, None]
efinal = self.efinal(a)[:, None]
Tv = self.Tv(t)[None, :, None]
a = (a/ self.H)[:, None, None]
M = self.M[None, None, :]
f = 2 / M * np.sin(2 * M * a) * np.exp(-4 * M**2 * Tv)
f = np.sum(f, axis=2)
f *= e0 - efinal
f += efinal
return f
def vs_PTIB(self, a, t):
"""Velocity of soil particles for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
vs : float of size (len(a), len(t))
Velocity of soil particles at depth a, time t.
"""
mvl = self.mvl
qu = self.qu
cv0 = self.cv0
H = self.H
Tv = self.Tv(t)[None, :, None]
a = (a / self.H)[:, None, None]
M = self.M[None, None, :]
f = np.cos(M * a) * np.exp(-M**2 * Tv)
f = np.sum(f, axis=2)
f *= 1 - np.exp(-mvl * qu)
f *= 2 * cv0/H
return f
def vs_PTPB(self, a, t):
"""Velocity of soil particles for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
vs : float of size (len(a), len(t))
Velocity of soil particles at depth a, time t.
"""
mvl = self.mvl
qu = self.qu
cv0 = self.cv0
H = self.H
Tv = self.Tv(t)[None, :, None]
a = (a / self.H)[:, None, None]
M = self.M[None, None, :]
f = (1 + np.cos(2 * M * a)) * np.exp(-4 * M**2 * Tv)
f = np.sum(f, axis=2)
f *= 1 - np.exp(-mvl * qu)
f *= 4 * cv0/H
return f
def vw_PTIB(self, a, t):
"""Velocity of fluid for PTIB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
vw : float of size (len(a), len(t))
Velocity of fluid at depth a, time t.
"""
mvl = self.mvl
qu = self.qu
cv0 = self.cv0
H = self.H
e = self.e_PTIB(a, t)
Tv = self.Tv(t)[None, :, None]
a = (a / self.H)[:, None, None]
M = self.M[None, None, :]
f = np.cos(M * a) * np.exp(-M**2 * Tv)
f = np.sum(f, axis=2)
f *= 1 - np.exp(-mvl * qu)
f *= 2 * cv0 / H
f /=e
return f
def vw_PTPB(self, a, t):
"""Velocity of fluid for PTPB drainage
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
vw : float of size (len(a), len(t))
Velocity of fluid at depth a, time t.
"""
mvl = self.mvl
qu = self.qu
cv0 = self.cv0
H = self.H
e = self.e_PTPB(a, t)
Tv = self.Tv(t)[None, :, None]
a = (a / self.H)[:, None, None]
M = self.M[None, None, :]
f1 = np.exp(-4 * M**2 * Tv)
f1 = np.sum(f1, axis=2)
f1 *= 1 - np.exp(-mvl * qu)
f1 *= 4 * cv0 / H
f1=f1.ravel()[None,:]*(1+e)/e
# f1 *= 1.0 + e
# f1 /= e
f2 = (1 + np.cos(2 * M * a)) * np.exp(-4 * M**2 * Tv)
f2 = np.sum(f2, axis=2)
f2 *= 1 - np.exp(-mvl * qu)
f2 *= 2 * cv0/H
f2 /=e
return f1-f2
def xi_PTIB(self, a,t):
"""Convectove cordinate from Lagrange coordinate
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
xi : float of size (len(a), len(t))
Convective coordinate at depth a, time t.
"""
S = self.settlement_PTIB(a,t)
f = a[:,None] + S
return f
def xi_PTPB(self, a,t):
"""Convectove cordinate from lagrange coordinate
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
Returns
-------
xi : float of size (len(a), len(t))
Convective coordinate at depth a, time t.
"""
S = self.settlement_PTPB(a,t)
f = a[:,None] + S
return f
def plot_all(self, a=None, t=None, figsize=(15,10)):
"""produce generic plots of all analysis variables
Parameters
----------
a : array like of float
Depth coord.
t : array like of float
Time coord.
figsize : 2-element tuple, optional
Width and height of figure in inches. Default figsize=(15, 10)
Returns
-------
fig : matplotlib.Figure
figure with all properties.
"""
if self.drn==1:
u_ = self.u_PTIB
settlement_ = self.settlement_PTIB
Us_ = self.Us_PTIB
Up_ = self.Up_PTIB
effective_stress_ = self.effective_stress_PTIB
total_stress_ = self.total_stress_PTIB
total_pore_pressure_ = self.total_pore_pressure_PTIB
e_ = self.e_PTIB
vs_ = self.vs_PTIB
vw_ = self.vw_PTIB
xi_ = self.xi_PTIB
else:
u_ = self.u_PTPB
settlement_ = self.settlement_PTPB
Us_ = self.Us_PTPB
Up_ = self.Up_PTPB
effective_stress_ = self.effective_stress_PTPB
total_stress_ = self.total_stress_PTPB
total_pore_pressure_ = self.total_pore_pressure_PTPB
e_ = self.e_PTPB
vs_ = self.vs_PTPB
vw_ = self.vw_PTPB
xi_ = self.xi_PTPB
t_interp = np.logspace(np.log10(0.0001/self.dTv),np.log10(3/self.dTv),100)
Us_interp = Us_(t_interp)
Tv_interp = self.Tv(t_interp)
# determine times to plot
if t is None:
Us_plot = np.linspace(0,1,11)
Us_plot[-1] = 0.99
t = np.interp(Us_plot, Us_interp, t_interp)
Tv = self.Tv(t)
if a is None:
a = np.linspace(0, self.H, 100)
a = np.asarray(a)
u = u_(a,t)
settlement = settlement_(a, t)
Up_interp = Up_(t_interp)
effective_stress = effective_stress_(a, t)
total_stress = total_stress_(a, t)
total_pore_pressure = total_pore_pressure_(a, t)
e = e_(a, t)
vs = vs_(a, t)
vw = vw_(a, t)
xi = xi_(a, t)
matplotlib.rcParams['font.size'] = 10
fig = plt.figure(figsize = figsize)
# Us and Up vs time
ax = fig.add_subplot(2,4,1)
ax.plot(Tv_interp, Us_interp, label="$U_s$")
ax.plot(Tv_interp, Up_interp, label="$U_p$")
ax.set_xlabel('$T_v,\, dT_v=${:6.2g}'.format(self.dTv))
ax.set_ylabel('U')
ax.set_ylim(0,1)
ax.invert_yaxis()
ax.set_xscale('log')
ax.grid()
leg = plt.legend(loc=3 )
leg.draggable()
#u vs depth
ax = fig.add_subplot(2,4,2)
ax.plot(u, xi)
ax.set_xlabel("$u$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0,self.H)
ax.invert_yaxis()
ax.grid()
for line, t_ in zip(ax.get_lines(), t):
Us = Us_(np.array([t_]))[0]
plt.setp(line,
label='$U_s={Us:6.3g}$\n$T_v={Tv:6.3g}$\n'
'$t={t:6.3g}$'.format(Tv=self.dTv*t_, t=t_, Us=Us))
loc = 'lower center'
bbox_transform = fig.transFigure
bbox_to_anchor = (0.5, 0)
leg = fig.legend(ax.get_lines(),
[v.get_label() for v in ax.get_lines()], loc=loc, bbox_transform=bbox_transform,
bbox_to_anchor=bbox_to_anchor,
ncol=len(t))
leg.draggable()
#total pore pressure vs depth
ax = fig.add_subplot(2,4,6)
ax.plot(total_pore_pressure, xi)
ax.set_xlabel("$p$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0,self.H)
ax.invert_yaxis()
ax.grid()
#effective stress vs depth
ax = fig.add_subplot(2,4,3)
ax.plot(effective_stress, xi)
ax.set_xlabel("$\sigma'$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0,self.H)
ax.invert_yaxis()
ax.grid()
#total stress vs depth
ax = fig.add_subplot(2,4,7)
ax.plot(total_stress, xi)
ax.set_xlabel("$\sigma$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0,self.H)
ax.invert_yaxis()
ax.grid()
#velocity of solids vs depth
ax = fig.add_subplot(2,4,4)
ax.plot(vs, xi)
ax.set_xlabel("$v_s$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0, self.H)
ax.invert_yaxis()
ax.grid()
#velocity of water vs depth
ax = fig.add_subplot(2,4,8)
ax.plot(vw, xi)
ax.set_xlabel("$v_w$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0, self.H)
ax.invert_yaxis()
ax.grid()
#void ratio vs depth
ax = fig.add_subplot(2,4,5)
ax.plot(e, xi)
ax.set_xlabel("$e$")
ax.set_ylabel(r'$\xi$')
ax.set_ylim(0, self.H)
ax.invert_yaxis()
ax.grid()
# fig.tight_layout()
# fig.tight_layout()
# bbox = leg.get_frame().get_bbox()
# print(bbox)
# plt.Figure.legend()
# a=plt.getp(fig.legend, 'bbox')
# print(a)
# bbox = fig.legend.get_window_extent()
# print(bbox)
# bbox2 = bbox.transformed(fig.transFigure.inverted())
# bbox2.width,bbox2.height
# print(bbox2)
#
fig.subplots_adjust(top=0.97, bottom=0.15, left=0.05, right=0.97)
return fig
def t_from_Us_PTIB(self, Us):
"""Back calc t from specified settlement doc for PTIB
Parameters
----------
Us : 1d array
Values of degree of consolidation by settlement to calc the t at.
Returns
-------
t : 1d array
Times coresponding to Us.
"""
t_interp = np.logspace(np.log10(0.0001/self.dTv),np.log10(10/self.dTv), 500)
Us_interp = self.Us_PTIB(t_interp)
t = np.interp(Us, Us_interp, t_interp)
return t
def t_from_Us_PTPB(self, Us):
"""Back calc t from specified settlement doc for PTPB
Parameters
----------
Us : 1d array
Values of degree of consolidation by settlement to calc the t at.
Returns
-------
t : 1d array
Times coresponding to Us.
"""
t_interp = np.logspace(np.log10(0.0001/self.dTv),np.log10(10/self.dTv), 500)
Us_interp = self.Us_PTPB(t_interp)
t = np.interp(Us, Us_interp, t_interp)
return t
def xie_and_leo_2004_figure_4(ax=None):
"""Reproduce figure 4 from article by Xie and Leo 2004
Pore pressure vs xi plot for various degrees of consolidation PTIB
Parameters
----------
ax : matplotlib.Axes
Axes object to plot on. If ax=None. plt.gca() will be used.
"""
qu=100
qp=10
H=10
Hw=1.0
kv0=1e-9
mvl=4e-3
e00=3
Gs=2.75
gamw=10 #N
drn=1
nterms=100
obj = XieAndLeo2004(qu=qu, qp=qp, H=H, Hw=Hw,
kv0=kv0, mvl=mvl,e00=e00, Gs=Gs, gamw=gamw,
drn=drn, nterms=nterms)
a = np.linspace(0,H, 100)
Us = np.array([0.3, 0.5, 0.7, 0.9])
t = obj.t_from_Us_PTIB(Us)
u = obj.u_PTIB(a, t)
xi = obj.xi_PTIB(a, t)
if ax is None:
ax = plt.gca()
ax.plot(u, xi)
ax.set_xlabel("$u$ Pore water pressure, PTPB")
ax.set_ylabel(r'$\xi$, depth from initial top surface')
ax.set_title("Figure 4 from Xie and Leo 2004")
ax.set_ylim(0, H)
ax.invert_yaxis()
ax.grid()
for line, t_, Us_ in zip(ax.get_lines(), t, Us):
plt.setp(line,
label='$U_s={Us:6.3g},\hspace{{0.5}}T_v={Tv:6.3g},\hspace{{0.5}}'
't={t:6.3g}$'.format(Tv=obj.dTv*t_, t=t_, Us=Us_))
leg = ax.legend(loc=1, labelspacing=0.0, fontsize=11)
leg.draggable()
return
def xie_and_leo_2004_figure_5(ax=None):
"""Reproduce fig 5 from article by Xie and Leo 2004
Pore pressure vs xi plot for various degrees of consolidation PTPB.
Parameters
----------
ax : matplotlib.Axes
Axes object to plot on. If ax=None. plt.gca() will be used.
"""
qu=100
qp=10
H=10
Hw=1.0
kv0=1e-9
mvl=4e-3
e00=3
Gs=2.75
gamw=10 #N
drn=0
nterms=100
obj = XieAndLeo2004(qu=qu, qp=qp, H=H, Hw=Hw,
kv0=kv0, mvl=mvl,e00=e00, Gs=Gs, gamw=gamw,
drn=drn, nterms=nterms)
a = np.linspace(0,H, 100)
Us = np.array([0.3, 0.5, 0.7, 0.9])
t = obj.t_from_Us_PTPB(Us)
u = obj.u_PTPB(a, t)
xi = obj.xi_PTPB(a, t)
if ax is None:
ax = plt.gca()
ax.plot(u, xi)
ax.set_xlabel("$u$ Pore water pressure, PTPB")
ax.set_ylabel(r'$\xi$, depth from initial top surface')
ax.set_title("Figure 5 from Xie and Leo 2004")
ax.set_ylim(0, H)
ax.invert_yaxis()
ax.grid()
for line, t_, Us_ in zip(ax.get_lines(), t, Us):
plt.setp(line,
label='$U_s={Us:6.3g},\hspace{{0.5}}T_v={Tv:6.3g},\hspace{{0.5}}'
't={t:6.3g}$'.format(Tv=obj.dTv*t_, t=t_, Us=Us_))
leg = ax.legend(loc=1, labelspacing=0.0, fontsize=11)
leg.draggable()
return
def xie_and_leo_2004_figure_6(ax=None):
"""Reproduce fig 6 from article by Xie and Leo 2004
Settlement vs time and degree of consolidation vs time.
Parameters
----------
ax : matplotlib.Axes
Axes object to plot on. If ax=None. plt.gca() will be used.
"""
qu=100
qp=10
H=10
Hw=1.0
kv0=1e-9
mvl=4e-3
e00=3
Gs=2.75
gamw=10 #N
drn=0
nterms=100
obj = XieAndLeo2004(qu=qu, qp=qp, H=H, Hw=Hw,
kv0=kv0, mvl=mvl,e00=e00, Gs=Gs, gamw=gamw,
drn=drn, nterms=nterms)
Tv = np.logspace(-3,np.log10(6), 200)
t = Tv / obj.dTv
Up_PTPB = obj.Up_PTPB(t)
Us_PTPB = obj.Us_PTPB(t)
S_PTPB = obj.settlement_PTPB(np.array([0.0]), t)
Up_PTIB = obj.Up_PTIB(t)
Us_PTIB = obj.Us_PTIB(t)
S_PTIB = obj.settlement_PTIB(np.array([0.0]), t)
if ax is None:
ax = plt.gca()
ax.plot(Tv, Us_PTPB, label="$U_s,\hspace{0.5}PTPB$", color='b', ls="-")
ax.plot(Tv, Up_PTPB, label="$U_p,\hspace{0.5}PTPB$", color='b', ls="--")
ax.plot(Tv, Us_PTIB, label="$U_s,\hspace{0.5}PTIB$", color='g', ls="-")
ax.plot(Tv, Up_PTIB, label="$U_p,\hspace{0.5}PTIB$", color='g', ls="--")
ax.set_xlabel("$T_v$")
ax.set_ylabel(r'degree of consolidation')
ax.set_title("Figure 6 from Xie and Leo 2004")
ax.set_ylim(0, 1)
ax.invert_yaxis()
ax.set_xscale('log')
ticks11a = matplotlib.ticker.LinearLocator(11)
ax.yaxis.set_major_locator(ticks11a)
# ax.locator_params(axis='y', nbins=16)
ax.grid(ls="-", which="major")
ax.grid(which="minor")
ax2 = ax.twinx()
ax2.plot(Tv, S_PTPB[0], label="$settlement,\hspace{0.5}PTPB$",color='b', dashes = [3,2,6,2])
ax2.plot(Tv, S_PTIB[0], label=r"$settlement,\hspace{0.5}PTIB$",color='g', dashes = [3,2,6,2])
ax2.set_ylabel(r'Settlement (m)')
ax2.set_ylim(0, 4)
ax2.invert_yaxis()
ticks11b = matplotlib.ticker.LinearLocator(11)
ax2.yaxis.set_major_locator(ticks11b)
lines=[]
labels=[]
for i in ax.get_lines():
lines.append(i)
labels.append(i.get_label())
for i in ax2.get_lines():
lines.append(i)
labels.append(i.get_label())
leg = ax.legend(lines, labels,loc=1, labelspacing=0.0, fontsize=12)
leg.draggable()
return
if __name__ == '__main__':
if 0:
xie_and_leo_2004_figure_4()
plt.show()
if 0:
xie_and_leo_2004_figure_5()
plt.show()
if 0:
xie_and_leo_2004_figure_6()
plt.show()
if 0:
# plot all
qu=100
qp=10
H=10
Hw=1.0
kv0=1e-9
mvl=4e-3
e00=3
Gs=2.75
gamw=10 #N
drn=1
nterms=100
obj = XieAndLeo2004(qu=qu, qp=qp, H=H, Hw=Hw,
kv0=kv0, mvl=mvl,e00=e00, Gs=Gs, gamw=gamw,
drn=drn, nterms=nterms)
fig = obj.plot_all()
plt.show()
| gpl-3.0 |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/testing/jpl_units/__init__.py | 3 | 3027 | #=======================================================================
"""
This is a sample set of units for use with testing unit conversion
of matplotlib routines. These are used because they use very strict
enforcement of unitized data which will test the entire spectrum of how
unitized data might be used (it is not always meaningful to convert to
a float without specific units given).
UnitDbl is essentially a unitized floating point number. It has a
minimal set of supported units (enough for testing purposes). All
of the mathematical operation are provided to fully test any behaviour
that might occur with unitized data. Remeber that unitized data has
rules as to how it can be applied to one another (a value of distance
cannot be added to a value of time). Thus we need to guard against any
accidental "default" conversion that will strip away the meaning of the
data and render it neutered.
Epoch is different than a UnitDbl of time. Time is something that can be
measured where an Epoch is a specific moment in time. Epochs are typically
referenced as an offset from some predetermined epoch.
A difference of two epochs is a Duration. The distinction between a
Duration and a UnitDbl of time is made because an Epoch can have different
frames (or units). In the case of our test Epoch class the two allowed
frames are 'UTC' and 'ET' (Note that these are rough estimates provided for
testing purposes and should not be used in production code where accuracy
of time frames is desired). As such a Duration also has a frame of
reference and therefore needs to be called out as different that a simple
measurement of time since a delta-t in one frame may not be the same in another.
"""
#=======================================================================
from Duration import Duration
from Epoch import Epoch
from UnitDbl import UnitDbl
from StrConverter import StrConverter
from EpochConverter import EpochConverter
from UnitDblConverter import UnitDblConverter
from UnitDblFormatter import UnitDblFormatter
#=======================================================================
__version__ = "1.0"
__all__ = [
'register',
'Duration',
'Epoch',
'UnitDbl',
'UnitDblFormatter',
]
#=======================================================================
def register():
"""Register the unit conversion classes with matplotlib."""
import matplotlib.units as mplU
mplU.registry[ str ] = StrConverter()
mplU.registry[ Epoch ] = EpochConverter()
mplU.registry[ UnitDbl ] = UnitDblConverter()
#=======================================================================
# Some default unit instances
# Distances
m = UnitDbl( 1.0, "m" )
km = UnitDbl( 1.0, "km" )
mile = UnitDbl( 1.0, "mile" )
# Angles
deg = UnitDbl( 1.0, "deg" )
rad = UnitDbl( 1.0, "rad" )
# Time
sec = UnitDbl( 1.0, "sec" )
min = UnitDbl( 1.0, "min" )
hr = UnitDbl( 1.0, "hour" )
day = UnitDbl( 24.0, "hour" )
sec = UnitDbl( 1.0, "sec" )
| gpl-3.0 |
nesterione/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/api/legend_demo.py | 6 | 1083 | import numpy as np
import matplotlib.pyplot as plt
a = np.arange(0,3,.02)
b = np.arange(0,3,.02)
c = np.exp(a)
d = c[::-1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(a,c,'k--',a,d,'k:',a,c+d,'k')
leg = ax.legend(('Model length', 'Data length', 'Total message length'),
'upper center', shadow=True)
ax.set_ylim([-1,20])
ax.grid(False)
ax.set_xlabel('Model complexity --->')
ax.set_ylabel('Message length --->')
ax.set_title('Minimum Message Length')
ax.set_yticklabels([])
ax.set_xticklabels([])
# set some legend properties. All the code below is optional. The
# defaults are usually sensible but if you need more control, this
# shows you how
# the matplotlib.patches.Rectangle instance surrounding the legend
frame = leg.get_frame()
frame.set_facecolor('0.80') # set the frame face color to light gray
# matplotlib.text.Text instances
for t in leg.get_texts():
t.set_fontsize('small') # the legend text fontsize
# matplotlib.lines.Line2D instances
for l in leg.get_lines():
l.set_linewidth(1.5) # the legend line width
plt.show()
| mit |
Ziqi-Li/bknqgis | pandas/pandas/tests/scalar/test_nat.py | 2 | 7458 | import pytest
from datetime import datetime, timedelta
import pytz
import numpy as np
from pandas import (NaT, Index, Timestamp, Timedelta, Period,
DatetimeIndex, PeriodIndex,
TimedeltaIndex, Series, isna)
from pandas.util import testing as tm
from pandas._libs.tslib import iNaT
@pytest.mark.parametrize('nat, idx', [(Timestamp('NaT'), DatetimeIndex),
(Timedelta('NaT'), TimedeltaIndex),
(Period('NaT', freq='M'), PeriodIndex)])
def test_nat_fields(nat, idx):
for field in idx._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(NaT, field)
assert np.isnan(result)
result = getattr(nat, field)
assert np.isnan(result)
for field in idx._bool_ops:
result = getattr(NaT, field)
assert result is False
result = getattr(nat, field)
assert result is False
def test_nat_vector_field_access():
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(idx, field)
expected = Index([getattr(x, field) for x in idx])
tm.assert_index_equal(result, expected)
s = Series(idx)
for field in DatetimeIndex._field_ops:
# weekday is a property of DTI, but a method
# on NaT/Timestamp for compat with datetime
if field == 'weekday':
continue
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
for field in DatetimeIndex._bool_ops:
result = getattr(s.dt, field)
expected = [getattr(x, field) for x in idx]
tm.assert_series_equal(result, Series(expected))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_identity(klass):
assert klass(None) is NaT
result = klass(np.nan)
assert result is NaT
result = klass(None)
assert result is NaT
result = klass(iNaT)
assert result is NaT
result = klass(np.nan)
assert result is NaT
result = klass(float('nan'))
assert result is NaT
result = klass(NaT)
assert result is NaT
result = klass('NaT')
assert result is NaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta, Period])
def test_equality(klass):
# nat
if klass is not Period:
klass('').value == iNaT
klass('nat').value == iNaT
klass('NAT').value == iNaT
klass(None).value == iNaT
klass(np.nan).value == iNaT
assert isna(klass('nat'))
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_round_nat(klass):
# GH14940
ts = klass('nat')
for method in ["round", "floor", "ceil"]:
round_method = getattr(ts, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert round_method(freq) is ts
def test_NaT_methods():
# GH 9513
raise_methods = ['astimezone', 'combine', 'ctime', 'dst',
'fromordinal', 'fromtimestamp', 'isocalendar',
'strftime', 'strptime', 'time', 'timestamp',
'timetuple', 'timetz', 'toordinal', 'tzname',
'utcfromtimestamp', 'utcnow', 'utcoffset',
'utctimetuple']
nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today',
'tz_convert', 'tz_localize']
nan_methods = ['weekday', 'isoweekday']
for method in raise_methods:
if hasattr(NaT, method):
with pytest.raises(ValueError):
getattr(NaT, method)()
for method in nan_methods:
if hasattr(NaT, method):
assert np.isnan(getattr(NaT, method)())
for method in nat_methods:
if hasattr(NaT, method):
# see gh-8254
exp_warning = None
if method == 'to_datetime':
exp_warning = FutureWarning
with tm.assert_produces_warning(
exp_warning, check_stacklevel=False):
assert getattr(NaT, method)() is NaT
# GH 12300
assert NaT.isoformat() == 'NaT'
@pytest.mark.parametrize('klass', [Timestamp, Timedelta])
def test_isoformat(klass):
result = klass('NaT').isoformat()
expected = 'NaT'
assert result == expected
def test_nat_arithmetic():
# GH 6873
i = 2
f = 1.5
for (left, right) in [(NaT, i), (NaT, f), (NaT, np.nan)]:
assert left / right is NaT
assert left * right is NaT
assert right * left is NaT
with pytest.raises(TypeError):
right / left
# Timestamp / datetime
t = Timestamp('2014-01-01')
dt = datetime(2014, 1, 1)
for (left, right) in [(NaT, NaT), (NaT, t), (NaT, dt)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# timedelta-like
# offsets are tested in test_offsets.py
delta = timedelta(3600)
td = Timedelta('5s')
for (left, right) in [(NaT, delta), (NaT, td)]:
# NaT + timedelta-like returns NaT
assert right + left is NaT
assert left + right is NaT
assert right - left is NaT
assert left - right is NaT
# GH 11718
t_utc = Timestamp('2014-01-01', tz='UTC')
t_tz = Timestamp('2014-01-01', tz='US/Eastern')
dt_tz = pytz.timezone('Asia/Tokyo').localize(dt)
for (left, right) in [(NaT, t_utc), (NaT, t_tz),
(NaT, dt_tz)]:
# NaT __add__ or __sub__ Timestamp-like (or inverse) returns NaT
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
# int addition / subtraction
for (left, right) in [(NaT, 2), (NaT, 0), (NaT, -3)]:
assert right + left is NaT
assert left + right is NaT
assert left - right is NaT
assert right - left is NaT
def test_nat_arithmetic_index():
# GH 11718
dti = DatetimeIndex(['2011-01-01', '2011-01-02'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
tm.assert_index_equal(dti + NaT, exp)
tm.assert_index_equal(NaT + dti, exp)
dti_tz = DatetimeIndex(['2011-01-01', '2011-01-02'],
tz='US/Eastern', name='x')
exp = DatetimeIndex([NaT, NaT], name='x', tz='US/Eastern')
tm.assert_index_equal(dti_tz + NaT, exp)
tm.assert_index_equal(NaT + dti_tz, exp)
exp = TimedeltaIndex([NaT, NaT], name='x')
for (left, right) in [(NaT, dti), (NaT, dti_tz)]:
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
# timedelta
tdi = TimedeltaIndex(['1 day', '2 day'], name='x')
exp = DatetimeIndex([NaT, NaT], name='x')
for (left, right) in [(NaT, tdi)]:
tm.assert_index_equal(left + right, exp)
tm.assert_index_equal(right + left, exp)
tm.assert_index_equal(left - right, exp)
tm.assert_index_equal(right - left, exp)
| gpl-2.0 |
Ziqi-Li/bknqgis | pandas/pandas/core/reshape/tile.py | 1 | 13771 | """
Quantilization functions and related stuff
"""
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
is_integer,
is_scalar,
is_categorical_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
_ensure_int64)
import pandas.core.algorithms as algos
import pandas.core.nanops as nanops
from pandas._libs.lib import infer_dtype
from pandas import (to_timedelta, to_datetime,
Categorical, Timestamp, Timedelta,
Series, Interval, IntervalIndex)
import numpy as np
def cut(x, bins, right=True, labels=None, retbins=False, precision=3,
include_lowest=False):
"""
Return indices of half-open bins to which each value of `x` belongs.
Parameters
----------
x : array-like
Input array to be binned. It has to be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
If `bins` is an int, it defines the number of equal-width bins in the
range of `x`. However, in this case, the range of `x` is extended
by .1% on each side to include the min or max values of `x`. If
`bins` is a sequence it defines the bin edges allowing for
non-uniform bin width. No extension of the range of `x` is done in
this case.
right : bool, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the bins or not. Can be useful if bins is given
as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
include_lowest : bool, optional
Whether the first interval should be left-inclusive or not.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
The `cut` function can be useful for going from a continuous variable to
a categorical variable. For example, `cut` could convert ages to groups
of age ranges.
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Categorical object
Examples
--------
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.19, 3.367], (0.19, 3.367], (0.19, 3.367], (3.367, 6.533], ...
Categories (3, interval[float64]): [(0.19, 3.367] < (3.367, 6.533] ...
>>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]),
... 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, good, medium, bad, good]
Categories (3, object): [good < medium < bad]
>>> pd.cut(np.ones(5), 4, labels=False)
array([1, 1, 1, 1, 1])
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
# for handling the cut for datetime and timedelta objects
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError('Cannot cut empty array')
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = [mi + 0.0 for mi in rng]
if mn == mx: # adjust end points before binning
mn -= .001 * abs(mn) if mn != 0 else .001
mx += .001 * abs(mx) if mx != 0 else .001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
pass
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
if (np.diff(bins) < 0).any():
raise ValueError('bins must increase monotonically.')
fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'):
"""
Quantile-based discretization function. Discretize variable into
equal-sized buckets based on rank or based on sample quantiles. For example
1000 values for 10 quantiles would produce a Categorical object indicating
quantile membership for each data point.
Parameters
----------
x : ndarray or Series
q : integer or array of quantiles
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
.. versionadded:: 0.20.0
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
x_is_series, series_index, name, x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if is_integer(q):
quantiles = np.linspace(0, 1, q + 1)
else:
quantiles = q
bins = algos.quantile(x, quantiles)
fac, bins = _bins_to_cuts(x, bins, labels=labels,
precision=precision, include_lowest=True,
dtype=dtype, duplicates=duplicates)
return _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name)
def _bins_to_cuts(x, bins, right=True, labels=None,
precision=3, include_lowest=False,
dtype=None, duplicates='raise'):
if duplicates not in ['raise', 'drop']:
raise ValueError("invalid value for 'duplicates' parameter, "
"valid options are: raise, drop")
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = algos.take_nd(bins, ids)
result = Categorical(result, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == 'raise':
raise ValueError("Bin edges must be unique: {}.\nYou "
"can drop duplicate edges by setting "
"the 'duplicates' kwarg".format(repr(bins)))
else:
bins = unique_bins
side = 'left' if right else 'right'
ids = _ensure_int64(bins.searchsorted(x, side=side))
if include_lowest:
ids[x == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if labels is None:
labels = _format_labels(bins, precision, right=right,
include_lowest=include_lowest,
dtype=dtype)
else:
if len(labels) != len(bins) - 1:
raise ValueError('Bin labels must be one fewer than '
'the number of bin edges')
if not is_categorical_dtype(labels):
labels = Categorical(labels, categories=labels, ordered=True)
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _trim_zeros(x):
while len(x) > 1 and x[-1] == '0':
x = x[:-1]
if len(x) > 1 and x[-1] == '.':
x = x[:-1]
return x
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta type,
this method converts it to integer so that cut method can
handle it
"""
dtype = None
if is_timedelta64_dtype(x):
x = to_timedelta(x).view(np.int64)
dtype = np.timedelta64
elif is_datetime64_dtype(x):
x = to_datetime(x).view(np.int64)
dtype = np.datetime64
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-liek of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins)
if is_timedelta64_dtype(dtype):
if bins_dtype in ['timedelta', 'timedelta64']:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype):
if bins_dtype in ['datetime', 'datetime64']:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _format_labels(bins, precision, right=True,
include_lowest=False, dtype=None):
""" based on the dtype, return our labels """
closed = 'right' if right else 'left'
if is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta('1ns')
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta('1ns')
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
labels = IntervalIndex.from_breaks(breaks, closed=closed)
if right and include_lowest:
# we will adjust the left hand side by precision to
# account that we are all right closed
v = adjust(labels[0].left)
i = IntervalIndex.from_intervals(
[Interval(v, labels[0].right, closed='right')])
labels = i.append(labels[1:])
return labels
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
seperately
"""
x_is_series = isinstance(x, Series)
series_index = None
name = None
if x_is_series:
series_index = x.index
name = x.name
x = np.asarray(x)
return x_is_series, series_index, name, x
def _postprocess_for_cut(fac, bins, retbins, x_is_series,
series_index, name):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if x_is_series:
fac = Series(fac, index=series_index, name=name)
if not retbins:
return fac
return fac, bins
def _round_frac(x, precision):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision, bins):
"""Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| gpl-2.0 |
mattjj/pybasicbayes | pybasicbayes/distributions/gaussian.py | 1 | 53626 | from __future__ import division
from builtins import map
from builtins import zip
from builtins import range
from builtins import object
__all__ = \
['Gaussian', 'GaussianFixedMean', 'GaussianFixedCov', 'GaussianFixed',
'GaussianNonConj', 'DiagonalGaussian', 'DiagonalGaussianNonconjNIG',
'IsotropicGaussian', 'ScalarGaussianNIX', 'ScalarGaussianNonconjNIX',
'ScalarGaussianNonconjNIG', 'ScalarGaussianFixedvar']
import numpy as np
from numpy import newaxis as na
from numpy.core.umath_tests import inner1d
import scipy.linalg
import scipy.stats as stats
import scipy.special as special
import copy
from pybasicbayes.abstractions import GibbsSampling, MeanField, \
MeanFieldSVI, Collapsed, MaxLikelihood, MAP, Tempering
from pybasicbayes.distributions.meta import _FixedParamsMixin
from pybasicbayes.util.stats import sample_niw, invwishart_entropy, \
sample_invwishart, invwishart_log_partitionfunction, \
getdatasize, flattendata, getdatadimension, \
combinedata, multivariate_t_loglik, gi, niw_expectedstats
weps = 1e-12
class _GaussianBase(object):
@property
def params(self):
return dict(mu=self.mu, sigma=self.sigma)
@property
def D(self):
return self.mu.shape[0]
### internals
def getsigma(self):
return self._sigma
def setsigma(self,sigma):
self._sigma = sigma
self._sigma_chol = None
sigma = property(getsigma,setsigma)
@property
def sigma_chol(self):
if not hasattr(self,'_sigma_chol') or self._sigma_chol is None:
self._sigma_chol = np.linalg.cholesky(self.sigma)
return self._sigma_chol
### distribution stuff
def rvs(self,size=None):
size = 1 if size is None else size
size = size + (self.mu.shape[0],) if isinstance(size,tuple) \
else (size,self.mu.shape[0])
return self.mu + np.random.normal(size=size).dot(self.sigma_chol.T)
def log_likelihood(self,x):
try:
mu, D = self.mu, self.D
sigma_chol = self.sigma_chol
bads = np.isnan(np.atleast_2d(x)).any(axis=1)
x = np.nan_to_num(x).reshape((-1,D)) - mu
xs = scipy.linalg.solve_triangular(sigma_chol,x.T,lower=True)
out = -1./2. * inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi) \
- np.log(sigma_chol.diagonal()).sum()
out[bads] = 0
return out
except np.linalg.LinAlgError:
# NOTE: degenerate distribution doesn't have a density
return np.repeat(-np.inf,x.shape[0])
### plotting
# TODO making animations, this seems to generate an extra notebook figure
_scatterplot = None
_parameterplot = None
def plot(self,ax=None,data=None,indices=None,color='b',
plot_params=True,label='',alpha=1.,
update=False,draw=True):
import matplotlib.pyplot as plt
from pybasicbayes.util.plot import project_data, \
plot_gaussian_projection, plot_gaussian_2D
ax = ax if ax else plt.gca()
D = self.D
if data is not None:
data = flattendata(data)
if data is not None:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
data = project_data(data,plot_basis)
if update and self._scatterplot is not None:
self._scatterplot.set_offsets(data)
self._scatterplot.set_color(color)
else:
self._scatterplot = ax.scatter(
data[:,0],data[:,1],marker='.',color=color)
if plot_params:
if D > 2:
plot_basis = np.random.RandomState(seed=0).randn(2,D)
self._parameterplot = \
plot_gaussian_projection(
self.mu,self.sigma,plot_basis,
color=color,label=label,alpha=min(1-1e-3,alpha),
ax=ax, artists=self._parameterplot if update else None)
else:
self._parameterplot = \
plot_gaussian_2D(
self.mu,self.sigma,color=color,label=label,
alpha=min(1-1e-3,alpha), ax=ax,
artists=self._parameterplot if update else None)
if draw:
plt.draw()
return [self._scatterplot] + list(self._parameterplot)
def to_json_dict(self):
D = self.mu.shape[0]
assert D == 2
U,s,_ = np.linalg.svd(self.sigma)
U /= np.linalg.det(U)
theta = np.arctan2(U[0,0],U[0,1])*180/np.pi
return {'x':self.mu[0],'y':self.mu[1],'rx':np.sqrt(s[0]),
'ry':np.sqrt(s[1]), 'theta':theta}
class Gaussian(
_GaussianBase, GibbsSampling, MeanField, MeanFieldSVI,
Collapsed, MAP, MaxLikelihood):
'''
Multivariate Gaussian distribution class.
NOTE: Only works for 2 or more dimensions. For a scalar Gaussian, use a
scalar class. Uses a conjugate Normal/Inverse-Wishart prior.
Hyperparameters mostly follow Gelman et al.'s notation in Bayesian Data
Analysis:
nu_0, sigma_0, mu_0, kappa_0
Parameters are mean and covariance matrix:
mu, sigma
'''
def __init__(
self, mu=None, sigma=None,
mu_0=None, sigma_0=None, kappa_0=None, nu_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = self.mu_mf = mu_0
self.sigma_0 = self.sigma_mf = sigma_0
self.kappa_0 = self.kappa_mf = kappa_0
self.nu_0 = self.nu_mf = nu_0
# NOTE: resampling will set mu_mf and sigma_mf if necessary
if mu is sigma is None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.resample() # initialize from prior
if mu is not None and sigma is not None \
and not any(_ is None for _ in (mu_0,sigma_0,kappa_0,nu_0)):
self.mu_mf = mu
self.sigma_mf = sigma * (self.nu_0 - self.mu_mf.shape[0] - 1)
@property
def hypparams(self):
return dict(
mu_0=self.mu_0,sigma_0=self.sigma_0,
kappa_0=self.kappa_0,nu_0=self.nu_0)
@property
def natural_hypparam(self):
return self._standard_to_natural(
self.mu_0,self.sigma_0,self.kappa_0,self.nu_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.mu_0, self.sigma_0, self.kappa_0, self.nu_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,mu_mf,sigma_mf,kappa_mf,nu_mf):
D = sigma_mf.shape[0]
out = np.zeros((D+2,D+2))
out[:D,:D] = sigma_mf + kappa_mf * np.outer(mu_mf,mu_mf)
out[:D,-2] = out[-2,:D] = kappa_mf * mu_mf
out[-2,-2] = kappa_mf
out[-1,-1] = nu_mf + 2 + D
return out
def _natural_to_standard(self,natparam):
D = natparam.shape[0]-2
A = natparam[:D,:D]
b = natparam[:D,-2]
c = natparam[-2,-2]
d = natparam[-1,-1]
return b/c, A - np.outer(b,b)/c, c, d - 2 - D
@property
def num_parameters(self):
D = self.D
return D*(D+1)/2
@property
def D(self):
if self.mu is not None:
return self.mu.shape[0]
elif self.mu_0 is not None:
return self.mu_0.shape[0]
def _get_statistics(self,data,D=None):
if D is None:
D = self.D if self.D is not None else getdatadimension(data)
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(data)
out[-2,:D] = out[:D,-2] = data.sum(0)
out[-2,-2] = out[-1,-1] = data.shape[0]
return out
else:
return sum(list(map(self._get_statistics,data)),out)
def _get_weighted_statistics(self,data,weights,D=None):
D = getdatadimension(data) if D is None else D
out = np.zeros((D+2,D+2))
if isinstance(data,np.ndarray):
out[:D,:D] = data.T.dot(weights[:,na]*data)
out[-2,:D] = out[:D,-2] = weights.dot(data)
out[-2,-2] = out[-1,-1] = weights.sum()
return out
else:
return sum(list(map(self._get_weighted_statistics,data,weights)),out)
def _get_empty_statistics(self, D):
out = np.zeros((D+2,D+2))
return out
def empirical_bayes(self,data):
self.natural_hypparam = self._get_statistics(data)
self.resample() # intialize from prior given new hyperparameters
return self
@staticmethod
def _stats_ensure_array(stats):
if isinstance(stats, np.ndarray):
return stats
x, xxT, n = stats
D = x.shape[-1]
out = np.zeros((D+2,D+2))
out[:D,:D] = xxT
out[-2,:D] = out[:D,-2] = x
out[-2,-2] = out[-1,-1] = n
return out
### Gibbs sampling
def resample(self,data=[]):
D = len(self.mu_0)
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D)))
# NOTE: next lines let Gibbs sampling initialize mean
nu = self.nu_mf if hasattr(self,'nu_mf') and self.nu_mf \
else self.nu_0
self.mu_mf, self._sigma_mf = self.mu, self.sigma * (nu - D - 1)
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigma = self.sigma.copy()
return new
### Mean Field
def _resample_from_mf(self):
self.mu, self.sigma = \
sample_niw(*self._natural_to_standard(
self.mf_natural_hypparam))
return self
def meanfieldupdate(self, data=None, weights=None, stats=None):
assert (data is not None and weights is not None) ^ (stats is not None)
stats = self._stats_ensure_array(stats) if stats is not None else \
self._get_weighted_statistics(data, weights, self.mu_0.shape[0])
self.mf_natural_hypparam = \
self.natural_hypparam + stats
def meanfield_sgdstep(self,data,weights,prob,stepsize):
D = len(self.mu_0)
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./prob
* self._get_weighted_statistics(data,weights,D))
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(
self.mu_mf,self.sigma_mf,self.kappa_mf,self.nu_mf)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mu_mf, self.sigma_mf, self.kappa_mf, self.nu_mf = \
self._natural_to_standard(natparam)
# NOTE: next line is for plotting
self.mu, self.sigma = \
self.mu_mf, self.sigma_mf/(self.nu_mf - self.mu_mf.shape[0] - 1)
@property
def sigma_mf(self):
return self._sigma_mf
@sigma_mf.setter
def sigma_mf(self,val):
self._sigma_mf = val
self._sigma_mf_chol = None
@property
def sigma_mf_chol(self):
if self._sigma_mf_chol is None:
self._sigma_mf_chol = np.linalg.cholesky(self.sigma_mf)
return self._sigma_mf_chol
def get_vlb(self):
D = len(self.mu_0)
loglmbdatilde = self._loglmbdatilde()
# see Eq. 10.77 in Bishop
q_entropy = -0.5 * (loglmbdatilde + D * (np.log(self.kappa_mf/(2*np.pi))-1)) \
+ invwishart_entropy(self.sigma_mf,self.nu_mf)
# see Eq. 10.74 in Bishop, we aren't summing over K
p_avgengy = 0.5 * (D * np.log(self.kappa_0/(2*np.pi)) + loglmbdatilde
- D*self.kappa_0/self.kappa_mf - self.kappa_0*self.nu_mf*
np.dot(self.mu_mf -
self.mu_0,np.linalg.solve(self.sigma_mf,self.mu_mf - self.mu_0))) \
- invwishart_log_partitionfunction(self.sigma_0,self.nu_0) \
+ (self.nu_0 - D - 1)/2*loglmbdatilde - 1/2*self.nu_mf \
* np.linalg.solve(self.sigma_mf,self.sigma_0).trace()
return p_avgengy + q_entropy
def expected_log_likelihood(self, x=None, stats=None):
assert (x is not None) ^ isinstance(stats, (tuple, np.ndarray))
if x is not None:
mu_n, kappa_n, nu_n = self.mu_mf, self.kappa_mf, self.nu_mf
D = len(mu_n)
x = np.reshape(x,(-1,D)) - mu_n # x is now centered
xs = np.linalg.solve(self.sigma_mf_chol,x.T)
# see Eqs. 10.64, 10.67, and 10.71 in Bishop
return self._loglmbdatilde()/2 - D/(2*kappa_n) - nu_n/2 * \
inner1d(xs.T,xs.T) - D/2*np.log(2*np.pi)
else:
D = self.mu_mf.shape[0]
E_J, E_h, E_muJmuT, E_logdetJ = \
niw_expectedstats(
self.nu_mf, self.sigma_mf, self.mu_mf, self.kappa_mf)
if isinstance(stats, np.ndarray):
parammat = np.zeros((D+2,D+2))
parammat[:D,:D] = E_J
parammat[:D,-2] = parammat[-2,:D] = -E_h
parammat[-2,-2] = E_muJmuT
parammat[-1,-1] = -E_logdetJ
contract = 'ij,nij->n' if stats.ndim == 3 else 'ij,ij->'
return -1./2*np.einsum(contract, parammat, stats) \
- D/2.*np.log(2*np.pi)
else:
x, xxT, n = stats
c1, c2 = ('i,i->', 'ij,ij->') if x.ndim == 1 \
else ('i,ni->n', 'ij,nij->n')
out = -1./2 * np.einsum(c2, E_J, xxT)
out += np.einsum(c1, E_h, x)
out += -n/2.*E_muJmuT
out += -D/2.*np.log(2*np.pi) + n/2.*E_logdetJ
return out
def _loglmbdatilde(self):
# see Eq. 10.65 in Bishop
D = len(self.mu_0)
chol = self.sigma_mf_chol
return special.digamma((self.nu_mf-np.arange(D))/2.).sum() \
+ D*np.log(2) - 2*np.log(chol.diagonal()).sum()
### Collapsed
def log_marginal_likelihood(self,data):
n, D = getdatasize(data), len(self.mu_0)
return self._log_partition_function(
*self._natural_to_standard(
self.natural_hypparam + self._get_statistics(data,D))) \
- self._log_partition_function(self.mu_0,self.sigma_0,self.kappa_0,self.nu_0) \
- n*D/2 * np.log(2*np.pi)
def _log_partition_function(self,mu,sigma,kappa,nu):
D = len(mu)
chol = np.linalg.cholesky(sigma)
return nu*D/2*np.log(2) + special.multigammaln(nu/2,D) + D/2*np.log(2*np.pi/kappa) \
- nu*np.log(chol.diagonal()).sum()
def log_predictive_studentt_datapoints(self,datapoints,olddata):
D = len(self.mu_0)
mu_n, sigma_n, kappa_n, nu_n = \
self._natural_to_standard(
self.natural_hypparam + self._get_statistics(olddata,D))
return multivariate_t_loglik(
datapoints,nu_n-D+1,mu_n,(kappa_n+1)/(kappa_n*(nu_n-D+1))*sigma_n)
def log_predictive_studentt(self,newdata,olddata):
newdata = np.atleast_2d(newdata)
return sum(self.log_predictive_studentt_datapoints(
d,combinedata((olddata,newdata[:i])))[0] for i,d in enumerate(newdata))
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
statmat = self._get_statistics(data,D)
else:
statmat = self._get_weighted_statistics(data,weights,D)
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
# this SVD is necessary to check if the max likelihood solution is
# degenerate, which can happen in the EM algorithm
if n < D or (np.linalg.svd(xxt,compute_uv=False) > 1e-6).sum() < D:
self.broken = True
self.mu = 99999999*np.ones(D)
self.sigma = np.eye(D)
else:
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
def MAP(self,data,weights=None):
D = getdatadimension(data)
# max likelihood with prior pseudocounts included in data
if weights is None:
statmat = self._get_statistics(data)
else:
statmat = self._get_weighted_statistics(data,weights)
statmat += self.natural_hypparam
n, x, xxt = statmat[-1,-1], statmat[-2,:D], statmat[:D,:D]
self.mu = x/n
self.sigma = xxt/n - np.outer(self.mu,self.mu)
return self
class GaussianFixedMean(_GaussianBase, GibbsSampling, MaxLikelihood):
def __init__(self,mu=None,sigma=None,nu_0=None,lmbda_0=None):
self.sigma = sigma
self.mu = mu
self.nu_0 = nu_0
self.lmbda_0 = lmbda_0
if sigma is None and not any(_ is None for _ in (nu_0,lmbda_0)):
self.resample() # initialize from prior
@property
def hypparams(self):
return dict(nu_0=self.nu_0,lmbda_0=self.lmbda_0)
@property
def num_parameters(self):
D = len(self.mu)
return D*(D+1)/2
def _get_statistics(self,data):
n = getdatasize(data)
if n > 1e-4:
if isinstance(data,np.ndarray):
centered = data[gi(data)] - self.mu
sumsq = centered.T.dot(centered)
n = len(centered)
else:
sumsq = sum((d[gi(d)]-self.mu).T.dot(d[gi(d)]-self.mu) for d in data)
else:
sumsq = None
return n, sumsq
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
centered = data - self.mu
sumsq = centered.T.dot(weights[:,na]*centered)
else:
sumsq = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
sumsq = sum((d-self.mu).T.dot(w[:,na]*(d-self.mu)) for w,d in zip(weights,data))
else:
sumsq = None
return neff, sumsq
def _posterior_hypparams(self,n,sumsq):
nu_0, lmbda_0 = self.nu_0, self.lmbda_0
if n > 1e-4:
nu_0 = nu_0 + n
sigma_n = self.lmbda_0 + sumsq
return sigma_n, nu_0
else:
return lmbda_0, nu_0
### Gibbs sampling
def resample(self, data=[]):
self.sigma = sample_invwishart(*self._posterior_hypparams(
*self._get_statistics(data)))
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
D = getdatadimension(data)
if weights is None:
n, sumsq = self._get_statistics(data)
else:
n, sumsq = self._get_weighted_statistics(data,weights)
if n < D or (np.linalg.svd(sumsq,compute_uv=False) > 1e-6).sum() < D:
# broken!
self.sigma = np.eye(D)*1e-9
self.broken = True
else:
self.sigma = sumsq/n
return self
class GaussianFixedCov(_GaussianBase, GibbsSampling, MaxLikelihood):
# See Gelman's Bayesian Data Analysis notation around Eq. 3.18, p. 85
# in 2nd Edition. We replaced \Lambda_0 with sigma_0 since it is a prior
# *covariance* matrix rather than a precision matrix.
def __init__(self,mu=None,sigma=None,mu_0=None,sigma_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.sigma_0 = sigma_0
if mu is None and not any(_ is None for _ in (mu_0,sigma_0)):
self.resample()
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigma_0=self.sigma_0)
@property
def sigma_inv(self):
if not hasattr(self,'_sigma_inv'):
self._sigma_inv = np.linalg.inv(self.sigma)
return self._sigma_inv
@property
def sigma_inv_0(self):
if not hasattr(self,'_sigma_inv_0'):
self._sigma_inv_0 = np.linalg.inv(self.sigma_0)
return self._sigma_inv_0
@property
def num_parameters(self):
return len(self.mu)
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean(0)
else:
xbar = sum(d.sum(0) for d in data) / n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
xbar = weights.dot(data) / neff
else:
xbar = None
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
xbar = sum(w.dot(d) for w,d in zip(weights,data)) / neff
else:
xbar = None
return neff, xbar
def _posterior_hypparams(self,n,xbar):
# It seems we should be working with lmbda and sigma inv (unless lmbda
# is a covariance, not a precision)
sigma_inv, mu_0, sigma_inv_0 = self.sigma_inv, self.mu_0, self.sigma_inv_0
if n > 0:
sigma_inv_n = n*sigma_inv + sigma_inv_0
mu_n = np.linalg.solve(
sigma_inv_n, sigma_inv_0.dot(mu_0) + n*sigma_inv.dot(xbar))
return mu_n, sigma_inv_n
else:
return mu_0, sigma_inv_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, sigma_n_inv = self._posterior_hypparams(*self._get_statistics(data))
D = len(mu_n)
L = np.linalg.cholesky(sigma_n_inv)
self.mu = scipy.linalg.solve_triangular(L,np.random.normal(size=D),lower=True) \
+ mu_n
return self
### Max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, xbar = self._get_statistics(data)
else:
n, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
return self
class GaussianFixed(_FixedParamsMixin, Gaussian):
def __init__(self,mu,sigma):
self.mu = mu
self.sigma = sigma
class GaussianNonConj(_GaussianBase, GibbsSampling):
def __init__(self,mu=None,sigma=None,
mu_0=None,mu_lmbda_0=None,nu_0=None,sigma_lmbda_0=None):
self._sigma_distn = GaussianFixedMean(mu=mu,
nu_0=nu_0,lmbda_0=sigma_lmbda_0,sigma=sigma)
self._mu_distn = GaussianFixedCov(sigma=self._sigma_distn.sigma,
mu_0=mu_0, sigma_0=mu_lmbda_0,mu=mu)
self._sigma_distn.mu = self._mu_distn.mu
@property
def hypparams(self):
d = self._mu_distn.hypparams
d.update(**self._sigma_distn.hypparams)
return d
def _get_mu(self):
return self._mu_distn.mu
def _set_mu(self,val):
self._mu_distn.mu = val
self._sigma_distn.mu = val
mu = property(_get_mu,_set_mu)
def _get_sigma(self):
return self._sigma_distn.sigma
def _set_sigma(self,val):
self._sigma_distn.sigma = val
self._mu_distn.sigma = val
sigma = property(_get_sigma,_set_sigma)
### Gibbs sampling
def resample(self,data=[],niter=1):
if getdatasize(data) == 0:
niter = 1
# TODO this is kinda dumb because it collects statistics over and over
# instead of updating them...
for itr in range(niter):
# resample mu
self._mu_distn.sigma = self._sigma_distn.sigma
self._mu_distn.resample(data)
# resample sigma
self._sigma_distn.mu = self._mu_distn.mu
self._sigma_distn.resample(data)
return self
# TODO collapsed
class DiagonalGaussian(_GaussianBase,GibbsSampling,MaxLikelihood,MeanField,Tempering):
'''
Product of normal-inverse-gamma priors over mu (mean vector) and sigmas
(vector of scalar variances).
The prior follows
sigmas ~ InvGamma(alphas_0,betas_0) iid
mu | sigma ~ N(mu_0,1/nus_0 * diag(sigmas))
It allows placing different prior hyperparameters on different components.
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,nus_0=None,alphas_0=None,betas_0=None):
# all the s's refer to the fact that these are vectors of length
# len(mu_0) OR scalars
if mu_0 is not None:
D = mu_0.shape[0]
if nus_0 is not None and \
(isinstance(nus_0,int) or isinstance(nus_0,float)):
nus_0 = nus_0*np.ones(D)
if alphas_0 is not None and \
(isinstance(alphas_0,int) or isinstance(alphas_0,float)):
alphas_0 = alphas_0*np.ones(D)
if betas_0 is not None and \
(isinstance(betas_0,int) or isinstance(betas_0,float)):
betas_0 = betas_0*np.ones(D)
self.mu_0 = self.mf_mu = mu_0
self.nus_0 = self.mf_nus = nus_0
self.alphas_0 = self.mf_alphas = alphas_0
self.betas_0 = self.mf_betas = betas_0
self.mu = mu
self.sigmas = sigmas
assert self.mu is None or (isinstance(self.mu,np.ndarray) and not isinstance(self.mu,np.ma.MaskedArray))
assert self.sigmas is None or (isinstance(self.sigmas,np.ndarray) and not isinstance(self.sigmas,np.ma.MaskedArray))
if mu is sigmas is None \
and not any(_ is None for _ in (mu_0,nus_0,alphas_0,betas_0)):
self.resample() # intialize from prior
### the basics!
@property
def parameters(self):
return self.mu, self.sigmas
@parameters.setter
def parameters(self, mu_sigmas_tuple):
(mu,sigmas) = mu_sigmas_tuple
self.mu, self.sigmas = mu, sigmas
@property
def sigma(self):
return np.diag(self.sigmas)
@sigma.setter
def sigma(self,val):
val = np.array(val)
assert val.ndim in (1,2)
if val.ndim == 1:
self.sigmas = val
else:
self.sigmas = np.diag(val)
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nus_0=self.nus_0,
alphas_0=self.alphas_0,betas_0=self.betas_0)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x,temperature=1.):
mu, sigmas, D = self.mu, self.sigmas * temperature, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
### posterior updating stuff
@property
def natural_hypparam(self):
return self._standard_to_natural(self.alphas_0,self.betas_0,self.mu_0,self.nus_0)
@natural_hypparam.setter
def natural_hypparam(self,natparam):
self.alphas_0, self.betas_0, self.mu_0, self.nus_0 = \
self._natural_to_standard(natparam)
def _standard_to_natural(self,alphas,betas,mu,nus):
return np.array([2*betas + nus * mu**2, nus*mu, nus, 2*alphas])
def _natural_to_standard(self,natparam):
nus = natparam[2]
mu = natparam[1] / nus
alphas = natparam[3]/2.
betas = (natparam[0] - nus*mu**2) / 2.
return alphas, betas, mu, nus
def _get_statistics(self,data):
if isinstance(data,np.ndarray) and data.shape[0] > 0:
data = data[gi(data)]
ns = np.repeat(*data.shape)
return np.array([
np.einsum('ni,ni->i',data,data),
np.einsum('ni->i',data),
ns,
ns,
])
else:
return sum((self._get_statistics(d) for d in data), self._empty_stats())
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
idx = ~np.isnan(data).any(1)
data = data[idx]
weights = weights[idx]
assert data.ndim == 2 and weights.ndim == 1 \
and data.shape[0] == weights.shape[0]
neff = np.repeat(weights.sum(),data.shape[1])
return np.array([weights.dot(data**2), weights.dot(data), neff, neff])
else:
return sum(
(self._get_weighted_statistics(d,w) for d, w in zip(data,weights)),
self._empty_stats())
def _empty_stats(self):
return np.zeros_like(self.natural_hypparam)
### Gibbs sampling
def resample(self,data=[],temperature=1.,stats=None):
stats = self._get_statistics(data) if stats is None else stats
alphas_n, betas_n, mu_n, nus_n = self._natural_to_standard(
self.natural_hypparam + stats / temperature)
D = mu_n.shape[0]
self.sigmas = 1/np.random.gamma(alphas_n,scale=1/betas_n)
self.mu = np.sqrt(self.sigmas/nus_n)*np.random.randn(D) + mu_n
assert not np.isnan(self.mu).any()
assert not np.isnan(self.sigmas).any()
# NOTE: next line is to use Gibbs sampling to initialize mean field
self.mf_mu = self.mu
assert self.sigmas.ndim == 1
return self
def copy_sample(self):
new = copy.copy(self)
new.mu = self.mu.copy()
new.sigmas = self.sigmas.copy()
return new
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, muhat, sumsq = self._get_statistics(data)
else:
n, muhat, sumsq = self._get_weighted_statistics_old(data,weights)
self.mu = muhat
self.sigmas = sumsq/n
return self
### Mean Field
@property
def mf_natural_hypparam(self):
return self._standard_to_natural(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,natparam):
self.mf_alphas, self.mf_betas, self.mf_mu, self.mf_nus = \
self._natural_to_standard(natparam)
# NOTE: this part is for plotting
self.mu = self.mf_mu
self.sigmas = np.where(self.mf_alphas > 1,self.mf_betas / (self.mf_alphas - 1),100000)
def meanfieldupdate(self,data,weights):
self.mf_natural_hypparam = \
self.natural_hypparam + self._get_weighted_statistics(data,weights)
def meanfield_sgdstep(self,data,weights,prob,stepsize):
self.mf_natural_hypparam = \
(1-stepsize) * self.mf_natural_hypparam + stepsize * (
self.natural_hypparam
+ 1./prob * self._get_weighted_statistics(data,weights))
def get_vlb(self):
natparam_diff = self.natural_hypparam - self.mf_natural_hypparam
expected_stats = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
linear_term = sum(v1.dot(v2) for v1, v2 in zip(natparam_diff, expected_stats))
normalizer_term = \
self._log_Z(self.alphas_0,self.betas_0,self.mu_0,self.nus_0) \
- self._log_Z(self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return linear_term - normalizer_term - len(self.mf_mu)/2. * np.log(2*np.pi)
def expected_log_likelihood(self,x):
x = np.atleast_2d(x).reshape((-1,len(self.mf_mu)))
a,b,c,d = self._expected_statistics(
self.mf_alphas,self.mf_betas,self.mf_mu,self.mf_nus)
return (x**2).dot(a) + x.dot(b) + c.sum() + d.sum() \
- len(self.mf_mu)/2. * np.log(2*np.pi)
def _expected_statistics(self,alphas,betas,mu,nus):
return np.array([
-1./2 * alphas/betas,
mu * alphas/betas,
-1./2 * (1./nus + mu**2 * alphas/betas),
-1./2 * (np.log(betas) - special.digamma(alphas))])
def _log_Z(self,alphas,betas,mu,nus):
return (special.gammaln(alphas) - alphas*np.log(betas) - 1./2*np.log(nus)).sum()
# TODO meanfield
class DiagonalGaussianNonconjNIG(_GaussianBase,GibbsSampling):
'''
Product of normal priors over mu and product of gamma priors over sigmas.
Note that while the conjugate prior in DiagonalGaussian is of the form
p(mu,sigmas), this prior is of the form p(mu)p(sigmas). Therefore its
resample() update has to perform inner iterations.
The prior follows
mu ~ N(mu_0,diag(sigmas_0))
sigmas ~ InvGamma(alpha_0,beta_0) iid
'''
def __init__(self,mu=None,sigmas=None,mu_0=None,sigmas_0=None,alpha_0=None,beta_0=None,
niter=20):
self.mu_0, self.sigmas_0 = mu_0, sigmas_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.niter = niter
if None in (mu,sigmas):
self.resample()
else:
self.mu, self.sigmas = mu, sigmas
@property
def hypparams(self):
return dict(mu_0=self.mu_0,sigmas_0=self.sigmas_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
# TODO next three methods are copied from DiagonalGaussian, factor them out
@property
def sigma(self):
return np.diag(self.sigmas)
def rvs(self,size=None):
size = np.array(size,ndmin=1)
return np.sqrt(self.sigmas)*\
np.random.normal(size=np.concatenate((size,self.mu.shape))) + self.mu
def log_likelihood(self,x):
mu, sigmas, D = self.mu, self.sigmas, self.mu.shape[0]
x = np.reshape(x,(-1,D))
Js = -1./(2*sigmas)
return (np.einsum('ij,ij,j->i',x,x,Js) - np.einsum('ij,j,j->i',x,2*mu,Js)) \
+ (mu**2*Js - 1./2*np.log(2*np.pi*sigmas)).sum()
def resample(self,data=[]):
n, y, ysq = self._get_statistics(data)
if n == 0:
self.mu = np.sqrt(self.sigmas_0) * np.random.randn(self.mu_0.shape[0]) + self.mu_0
self.sigmas = 1./np.random.gamma(self.alpha_0,scale=1./self.beta_0)
else:
for itr in range(self.niter):
sigmas_n = 1./(1./self.sigmas_0 + n / self.sigmas)
mu_n = (self.mu_0 / self.sigmas_0 + y / self.sigmas) * sigmas_n
self.mu = np.sqrt(sigmas_n) * np.random.randn(mu_n.shape[0]) + mu_n
alphas_n = self.alpha_0 + 1./2*n
betas_n = self.beta_0 + 1./2*(ysq + n*self.mu**2 - 2*self.mu*y)
self.sigmas = 1./np.random.gamma(alphas_n,scale=1./betas_n)
return self
def _get_statistics(self,data):
# TODO dont forget to handle nans
assert isinstance(data,(list,np.ndarray)) and not isinstance(data,np.ma.MaskedArray)
if isinstance(data,np.ndarray):
data = data[gi(data)]
n = data.shape[0]
y = np.einsum('ni->i',data)
ysq = np.einsum('ni,ni->i',data,data)
return np.array([n,y,ysq],dtype=np.object)
else:
return sum((self._get_statistics(d) for d in data),self._empty_stats)
@property
def _empty_stats(self):
return np.array([0.,np.zeros_like(self.mu_0),np.zeros_like(self.mu_0)],
dtype=np.object)
# TODO collapsed, meanfield, max_likelihood
class IsotropicGaussian(GibbsSampling):
'''
Normal-Inverse-Gamma prior over mu (mean vector) and sigma (scalar
variance). Essentially, all coordinates of all observations inform the
variance.
The prior follows
sigma ~ InvGamma(alpha_0,beta_0)
mu | sigma ~ N(mu_0,sigma/nu_0 * I)
'''
def __init__(self,mu=None,sigma=None,mu_0=None,nu_0=None,alpha_0=None,beta_0=None):
self.mu = mu
self.sigma = sigma
self.mu_0 = mu_0
self.nu_0 = nu_0
self.alpha_0 = alpha_0
self.beta_0 = beta_0
if mu is sigma is None and not any(_ is None for _ in (mu_0,nu_0,alpha_0,beta_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,nu_0=self.nu_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
def rvs(self,size=None):
return np.sqrt(self.sigma)*np.random.normal(size=tuple(size)+self.mu.shape) + self.mu
def log_likelihood(self,x):
mu, sigma, D = self.mu, self.sigma, self.mu.shape[0]
x = np.reshape(x,(-1,D))
return (-0.5*((x-mu)**2).sum(1)/sigma - D*np.log(np.sqrt(2*np.pi*sigma)))
def _posterior_hypparams(self,n,xbar,sumsq):
mu_0, nu_0, alpha_0, beta_0 = self.mu_0, self.nu_0, self.alpha_0, self.beta_0
D = mu_0.shape[0]
if n > 0:
nu_n = D*n + nu_0
alpha_n = alpha_0 + D*n/2
beta_n = beta_0 + 1/2*sumsq + (n*D*nu_0)/(n*D+nu_0) * 1/2 * ((xbar - mu_0)**2).sum()
mu_n = (n*xbar + nu_0*mu_0)/(n+nu_0)
return mu_n, nu_n, alpha_n, beta_n
else:
return mu_0, nu_0, alpha_0, beta_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, nu_n, alpha_n, beta_n = self._posterior_hypparams(
*self._get_statistics(data, D=self.mu_0.shape[0]))
D = mu_n.shape[0]
self.sigma = 1/np.random.gamma(alpha_n,scale=1/beta_n)
self.mu = np.sqrt(self.sigma/nu_n)*np.random.randn(D)+mu_n
return self
def _get_statistics(self,data, D=None):
n = getdatasize(data)
if n > 0:
D = D if D else getdatadimension(data)
if isinstance(data,np.ndarray):
assert (data.ndim == 1 and data.shape == (D,)) \
or (data.ndim == 2 and data.shape[1] == D)
data = np.reshape(data,(-1,D))
xbar = data.mean(0)
sumsq = ((data-xbar)**2).sum()
else:
xbar = sum(np.reshape(d,(-1,D)).sum(0) for d in data) / n
sumsq = sum(((np.reshape(data,(-1,D)) - xbar)**2).sum() for d in data)
else:
xbar, sumsq = None, None
return n, xbar, sumsq
class _ScalarGaussianBase(object):
@property
def params(self):
return dict(mu=self.mu,sigmasq=self.sigmasq)
def rvs(self,size=None):
return np.sqrt(self.sigmasq)*np.random.normal(size=size)+self.mu
def log_likelihood(self,x):
x = np.reshape(x,(-1,1))
return (-0.5*(x-self.mu)**2/self.sigmasq - np.log(np.sqrt(2*np.pi*self.sigmasq))).ravel()
def __repr__(self):
return self.__class__.__name__ + '(mu=%f,sigmasq=%f)' % (self.mu,self.sigmasq)
def plot(self,data=None,indices=None,color='b',plot_params=True,label=None):
import matplotlib.pyplot as plt
data = np.concatenate(data) if data is not None else None
indices = np.concatenate(indices) if indices is not None else None
if data is not None:
assert indices is not None
plt.plot(indices,data,color=color,marker='x',linestyle='')
if plot_params:
assert indices is not None
if len(indices) > 1:
from util.general import rle
vals, lens = rle(np.diff(indices))
starts = np.concatenate(((0,),lens.cumsum()[:-1]))
for start, blocklen in zip(starts[vals == 1], lens[vals == 1]):
plt.plot(indices[start:start+blocklen],
np.repeat(self.mu,blocklen),color=color,linestyle='--')
else:
plt.plot(indices,[self.mu],color=color,marker='+')
### mostly shared statistics gathering
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
ybar = data.mean()
centered = data.ravel() - ybar
sumsqc = centered.dot(centered)
elif isinstance(data,list):
ybar = sum(d.sum() for d in data)/n
sumsqc = sum((d.ravel()-ybar).dot(d.ravel()-ybar) for d in data)
else:
ybar = data
sumsqc = 0
else:
ybar = None
sumsqc = None
return n, ybar, sumsqc
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
if neff > weps:
ybar = weights.dot(data.ravel()) / neff
centered = data.ravel() - ybar
sumsqc = centered.dot(weights*centered)
else:
ybar = None
sumsqc = None
elif isinstance(data,list):
neff = sum(w.sum() for w in weights)
if neff > weps:
ybar = sum(w.dot(d.ravel()) for d,w in zip(data,weights)) / neff
sumsqc = sum((d.ravel()-ybar).dot(w*(d.ravel()-ybar))
for d,w in zip(data,weights))
else:
ybar = None
sumsqc = None
else:
ybar = data
sumsqc = 0
return neff, ybar, sumsqc
### max likelihood
def max_likelihood(self,data,weights=None):
if weights is None:
n, ybar, sumsqc = self._get_statistics(data)
else:
n, ybar, sumsqc = self._get_weighted_statistics(data,weights)
if sumsqc > 0:
self.mu = ybar
self.sigmasq = sumsqc/n
else:
self.broken = True
self.mu = 999999999.
self.sigmsq = 1.
return self
class ScalarGaussianNIX(_ScalarGaussianBase, GibbsSampling, Collapsed):
'''
Conjugate Normal-(Scaled-)Inverse-ChiSquared prior. (Another parameterization is the
Normal-Inverse-Gamma.)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,kappa_0=None,sigmasq_0=None,nu_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.kappa_0 = kappa_0
self.sigmasq_0 = sigmasq_0
self.nu_0 = nu_0
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0,kappa_0,sigmasq_0,nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,kappa_0=self.kappa_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def _posterior_hypparams(self,n,ybar,sumsqc):
mu_0, kappa_0, sigmasq_0, nu_0 = self.mu_0, self.kappa_0, self.sigmasq_0, self.nu_0
if n > 0:
kappa_n = kappa_0 + n
mu_n = (kappa_0 * mu_0 + n * ybar) / kappa_n
nu_n = nu_0 + n
sigmasq_n = 1/nu_n * (nu_0 * sigmasq_0 + sumsqc + kappa_0 * n / (kappa_0 + n) * (ybar - mu_0)**2)
return mu_n, kappa_n, sigmasq_n, nu_n
else:
return mu_0, kappa_0, sigmasq_0, nu_0
### Gibbs sampling
def resample(self,data=[]):
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
self.sigmasq = nu_n * sigmasq_n / np.random.chisquare(nu_n)
self.mu = np.sqrt(self.sigmasq / kappa_n) * np.random.randn() + mu_n
return self
### Collapsed
def log_marginal_likelihood(self,data):
n = getdatasize(data)
kappa_0, sigmasq_0, nu_0 = self.kappa_0, self.sigmasq_0, self.nu_0
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(data))
return special.gammaln(nu_n/2) - special.gammaln(nu_0/2) \
+ 0.5*(np.log(kappa_0) - np.log(kappa_n)
+ nu_0 * (np.log(nu_0) + np.log(sigmasq_0))
- nu_n * (np.log(nu_n) + np.log(sigmasq_n))
- n*np.log(np.pi))
def log_predictive_single(self,y,olddata):
# mostly for testing or speed
mu_n, kappa_n, sigmasq_n, nu_n = self._posterior_hypparams(*self._get_statistics(olddata))
return stats.t.logpdf(y,nu_n,loc=mu_n,scale=np.sqrt((1+kappa_n)*sigmasq_n/kappa_n))
class ScalarGaussianNonconjNIX(_ScalarGaussianBase, GibbsSampling):
'''
Non-conjugate separate priors on mean and variance parameters, via
mu ~ Normal(mu_0,tausq_0)
sigmasq ~ (Scaled-)Inverse-ChiSquared(sigmasq_0,nu_0)
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None,sigmasq_0=None,nu_0=None,
niter=1):
self.mu, self.sigmasq = mu, sigmasq
self.mu_0, self.tausq_0 = mu_0, tausq_0
self.sigmasq_0, self.nu_0 = sigmasq_0, nu_0
self.niter = niter
if mu is sigmasq is None \
and not any(_ is None for _ in (mu_0, tausq_0, sigmasq_0, nu_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0,
sigmasq_0=self.sigmasq_0,nu_0=self.nu_0)
def resample(self,data=[],niter=None):
n = getdatasize(data)
niter = self.niter if niter is None else niter
if n > 0:
data = flattendata(data)
datasum = data[gi(data)].sum()
datasqsum = (data[gi(data)]**2).sum()
nu_n = self.nu_0 + n
for itr in range(niter):
# resample mean
tausq_n = 1/(1/self.tausq_0 + n/self.sigmasq)
mu_n = tausq_n*(self.mu_0/self.tausq_0 + datasum/self.sigmasq)
self.mu = np.sqrt(tausq_n)*np.random.normal() + mu_n
# resample variance
sigmasq_n = (self.nu_0*self.sigmasq_0 + (datasqsum + n*self.mu**2-2*datasum*self.mu))/nu_n
self.sigmasq = sigmasq_n*nu_n/np.random.chisquare(nu_n)
else:
self.mu = np.sqrt(self.tausq_0) * np.random.normal() + self.mu_0
self.sigmasq = self.sigmasq_0*self.nu_0/np.random.chisquare(self.nu_0)
return self
class ScalarGaussianNonconjNIG(_ScalarGaussianBase, MeanField, MeanFieldSVI):
# NOTE: this is like ScalarGaussianNonconjNiIG except prior is in natural
# coordinates
def __init__(self,h_0,J_0,alpha_0,beta_0,
mu=None,sigmasq=None,
h_mf=None,J_mf=None,alpha_mf=None,beta_mf=None,niter=1):
self.h_0, self.J_0 = h_0, J_0
self.alpha_0, self.beta_0 = alpha_0, beta_0
self.h_mf = h_mf if h_mf is not None else J_0 * np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.J_mf = J_mf if J_mf is not None else J_0
self.alpha_mf = alpha_mf if alpha_mf is not None else alpha_0
self.beta_mf = beta_mf if beta_mf is not None else beta_0
self.niter = niter
self.mu = mu if mu is not None else np.random.normal(h_0/J_0,1./np.sqrt(J_0))
self.sigmasq = sigmasq if sigmasq is not None else 1./np.random.gamma(alpha_0,1./beta_0)
@property
def hypparams(self):
return dict(h_0=self.h_0,J_0=self.J_0,alpha_0=self.alpha_0,beta_0=self.beta_0)
@property
def _E_mu(self):
# E[mu], E[mu**2]
return self.h_mf / self.J_mf, 1./self.J_mf + (self.h_mf / self.J_mf)**2
@property
def _E_sigmasq(self):
# E[1/sigmasq], E[ln sigmasq]
return self.alpha_mf / self.beta_mf, \
np.log(self.beta_mf) - special.digamma(self.alpha_mf)
@property
def natural_hypparam(self):
return np.array([self.alpha_0,self.beta_0,self.h_0,self.J_0])
@natural_hypparam.setter
def natural_hypparam(self,natural_hypparam):
self.alpha_0, self.beta_0, self.h_0, self.J_0 = natural_hypparam
@property
def mf_natural_hypparam(self):
return np.array([self.alpha_mf,self.beta_mf,self.h_mf,self.J_mf])
@mf_natural_hypparam.setter
def mf_natural_hypparam(self,mf_natural_hypparam):
self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf = mf_natural_hypparam
# set point estimates of (mu, sigmasq) for plotting and stuff
self.mu, self.sigmasq = self.h_mf / self.J_mf, self.beta_mf / (self.alpha_mf-1)
def _resample_from_mf(self):
self.mu, self.sigmasq = np.random.normal(self.h_mf/self.J_mf,np.sqrt(1./self.J_mf)), \
np.random.gamma(self.alpha_mf,1./self.beta_mf)
return self
def expected_log_likelihood(self,x):
(Emu, Emu2), (Esigmasqinv, Elnsigmasq) = self._E_mu, self._E_sigmasq
return -1./2 * Esigmasqinv * (x**2 + Emu2 - 2*x*Emu) \
- 1./2*Elnsigmasq - 1./2*np.log(2*np.pi)
def get_vlb(self):
# E[ln p(mu) / q(mu)] part
h_0, J_0, J_mf = self.h_0, self.J_0, self.J_mf
Emu, Emu2 = self._E_mu
p_mu_avgengy = -1./2*J_0*Emu2 + h_0*Emu \
- 1./2*(h_0**2/J_0) + 1./2*np.log(J_0) - 1./2*np.log(2*np.pi)
q_mu_entropy = 1./2*np.log(2*np.pi*np.e/J_mf)
# E[ln p(sigmasq) / q(sigmasq)] part
alpha_0, beta_0, alpha_mf, beta_mf = \
self.alpha_0, self.beta_0, self.alpha_mf, self.beta_mf
(Esigmasqinv, Elnsigmasq) = self._E_sigmasq
p_sigmasq_avgengy = (-alpha_0-1)*Elnsigmasq + (-beta_0)*Esigmasqinv \
- (special.gammaln(alpha_0) - alpha_0*np.log(beta_0))
q_sigmasq_entropy = alpha_mf + np.log(beta_mf) + special.gammaln(alpha_mf) \
- (1+alpha_mf)*special.digamma(alpha_mf)
return p_mu_avgengy + q_mu_entropy + p_sigmasq_avgengy + q_sigmasq_entropy
def meanfield_sgdstep(self,data,weights,prob,stepsize):
# like meanfieldupdate except we step the factors simultaneously
# NOTE: unlike the fully conjugate case, there are interaction terms, so
# we work on the destructured pieces
neff, y, ysq = self._get_weighted_statistics(data,weights)
Emu, _ = self._E_mu
Esigmasqinv, _ = self._E_sigmasq
# form new natural hyperparameters as if doing a batch update
alpha_new = self.alpha_0 + 1./prob * 1./2*neff
beta_new = self.beta_0 + 1./prob * 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
h_new = self.h_0 + 1./prob * Esigmasqinv * y
J_new = self.J_0 + 1./prob * Esigmasqinv * neff
# take a step
self.alpha_mf = (1-stepsize)*self.alpha_mf + stepsize*alpha_new
self.beta_mf = (1-stepsize)*self.beta_mf + stepsize*beta_new
self.h_mf = (1-stepsize)*self.h_mf + stepsize*h_new
self.J_mf = (1-stepsize)*self.J_mf + stepsize*J_new
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = (self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def meanfieldupdate(self,data,weights,niter=None):
niter = niter if niter is not None else self.niter
neff, y, ysq = self._get_weighted_statistics(data,weights)
for niter in range(niter):
# update q(sigmasq)
Emu, _ = self._E_mu
self.alpha_mf = self.alpha_0 + 1./2*neff
self.beta_mf = self.beta_0 + 1./2*(ysq + neff*Emu**2 - 2*Emu*y)
# update q(mu)
Esigmasqinv, _ = self._E_sigmasq
self.h_mf = self.h_0 + Esigmasqinv * y
self.J_mf = self.J_0 + Esigmasqinv * neff
# calling this setter will set point estimates for (mu,sigmasq) for
# plotting and sampling and stuff
self.mf_natural_hypparam = \
(self.alpha_mf, self.beta_mf, self.h_mf, self.J_mf)
return self
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
y = weights.dot(data)
ysq = weights.dot(data**2)
else:
return sum(
self._get_weighted_statistics(d,w) for d,w in zip(data,weights))
return np.array([neff,y,ysq])
class ScalarGaussianFixedvar(_ScalarGaussianBase, GibbsSampling):
'''
Conjugate normal prior on mean.
'''
def __init__(self,mu=None,sigmasq=None,mu_0=None,tausq_0=None):
self.mu = mu
self.sigmasq = sigmasq
self.mu_0 = mu_0
self.tausq_0 = tausq_0
if mu is None and not any(_ is None for _ in (mu_0,tausq_0)):
self.resample() # intialize from prior
@property
def hypparams(self):
return dict(mu_0=self.mu_0,tausq_0=self.tausq_0)
def _posterior_hypparams(self,n,xbar):
mu_0, tausq_0 = self.mu_0, self.tausq_0
sigmasq = self.sigmasq
if n > 0:
tausq_n = 1/(1/tausq_0 + n/sigmasq)
mu_n = (mu_0/tausq_0 + n*xbar/sigmasq)*tausq_n
return mu_n, tausq_n
else:
return mu_0, tausq_0
def resample(self,data=[]):
mu_n, tausq_n = self._posterior_hypparams(*self._get_statistics(data))
self.mu = np.sqrt(tausq_n)*np.random.randn()+mu_n
return self
def _get_statistics(self,data):
n = getdatasize(data)
if n > 0:
if isinstance(data,np.ndarray):
xbar = data.mean()
else:
xbar = sum(d.sum() for d in data)/n
else:
xbar = None
return n, xbar
def _get_weighted_statistics(self,data,weights):
if isinstance(data,np.ndarray):
neff = weights.sum()
else:
neff = sum(w.sum() for w in weights)
if neff > weps:
if isinstance(data,np.ndarray):
xbar = data.dot(weights) / neff
else:
xbar = sum(w.dot(d) for d,w in zip(data,weights)) / neff
else:
xbar = None
return neff, xbar
def max_likelihood(self,data,weights=None):
if weights is None:
_, xbar = self._get_statistics(data)
else:
_, xbar = self._get_weighted_statistics(data,weights)
self.mu = xbar
| mit |
vinutah/projects | ml/pde/fd1d_heat_explicit_test01.py | 1 | 7024 | #!/usr/bin/env python
def fd1d_heat_explicit_test01(path,mode,weightsFile):
"""fd1d_heat_explicit_test01 does a simple test problem"""
from fd1d_heat_explicit import fd1d_heat_explicit
from fd1d_heat_explicit_cfl import fd1d_heat_explicit_cfl
from r8mat_write import r8mat_write
from r8vec_write import r8vec_write
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
print ''
print 'FD1D_HEAT_EXPLICIT_TEST01:'
print ' Compute an approximate solution to the time-dependent'
print ' one dimensional heat equation:'
print ''
print ' dH/dt - K * d2H/dx2 = f(x,t)'
print ''
print ' Run a simple test case.'
"""Heat coefficient"""
k = k_test01 ( )
#
# X_NUM is the number of equally spaced nodes to use between 0 and 1.
#
x_num = 21
x_min = 0.0
x_max = 1.0
dx = ( x_max - x_min ) / ( x_num - 1 )
x = np.linspace ( x_min, x_max, x_num )
#
# T_NUM is the number of equally spaced time points between 0 and 10.0.
#
t_num = 201
t_min = 0.0
t_max = 80.0
dt = ( t_max - t_min ) / ( t_num - 1 )
t = np.linspace ( t_min, t_max, t_num )
#
# Get the CFL coefficient.
#
cfl = fd1d_heat_explicit_cfl ( k, t_num, t_min, t_max, x_num, x_min, x_max )
print ''
print ' Number of X nodes = %d' % ( x_num )
print ' X interval is [%f,%f]' % ( x_min, x_max )
print ' X spacing is %f' % ( dx )
print ' Number of T values = %d' % ( t_num )
print ' T interval is [%f,%f]' % ( t_min, t_max )
print ' T spacing is %f' % ( dt )
print ' Constant K = %g' % ( k )
print ' CFL coefficient = %g' % ( cfl )
#
# Running the code produces an array H of temperatures H(t,x),
# and vectors x and t.
#
hmat = np.zeros ( ( x_num, t_num ) )
for j in range ( 0, t_num ):
if ( j == 0 ):
h = ic_test01 ( x_num, x, t[j] ,mode)
h = bc_test01 ( x_num, x, t[j], h ,mode)
else:
h = fd1d_heat_explicit ( x_num, x, t[j-1], dt, cfl, rhs_test01, bc_test01, h, mode, weightsFile )
for i in range ( 0, x_num ):
hmat[i,j] = h[i]
#
# Plot the data.
#
tmat, xmat = np.meshgrid ( t, x )
fig = plt.figure ( )
ax = fig.add_subplot ( 111, projection = '3d' )
ax = Axes3D ( fig )
surf = ax.plot_surface ( xmat, tmat, hmat )
plt.xlabel ( '<---X--->' )
plt.ylabel ( '<---T--->' )
plt.title ( 'U(X,T)' )
save_at = path.strip()
print path, save_at
filename = str(save_at) + 'plot_test_' + str(mode) + '.png'
print filename
plt.savefig (filename)
#plt.show ( )
#
# Write the data to files.
#
filename = str(save_at) + 'h_test01.txt'
r8mat_write ( filename, x_num, t_num, hmat )
#filename = str(save_at) + 't_test01.txt'
#r8vec_write ( filename, t_num, t )
#filename = str(save_at) + 'x_test01.txt'
#r8vec_write ( filename, x_num, x )
print ''
print ' H(X,T) written to "h_test01.txt"'
print ' T values written to "t_test01.txt"'
print ' X values written to "x_test01.txt"'
#
# Terminate.
#
print ''
print 'FD1D_HEAT_EXPLICIT_TEST01:'
print ' Normal end of execution.'
return
def bc_test01 ( x_num, x, t, h, mode ):
# bc_test01 evaluates the boundary conditions for problem 1.
# Input, integer X_NUM, the number of nodes.
# Input, real X(X_NUM,1), the node coordinates.
# Input, real T, the current time.
# Input, real H(X_NUM), the current heat values.
# Output, real H(X_NUM), the current heat values, after boundary
# conditions have been imposed.
#
#for uniform
if mode == 'original_uni_1':
h[0] = 90.0
h[x_num-1] = 70.0
if mode == 'original_uni_2':
h[0] = 50.0
h[x_num-1] = 50.0
if mode == 'original_uni_3':
h[0] = 25.0
h[x_num-1] = 85.0
if mode == 'original_uni_4':
h[0] = 75.0
h[x_num-1] = 80.0
#for tri
if mode == 'original_tri_1':
h[0] = 0.0
h[x_num-1] = 0.0
if mode == 'original_tri_2':
h[0] = 5.0
h[x_num-1] = 5.0
if mode == 'original_tri_3':
h[0] = 20.0
h[x_num-1] = 20.0
if mode == 'original_tri_4':
h[0] = 10.0
h[x_num-1] = 10.0
#for pwl
if mode == 'original_pwl_1':
h[0] = 0.0
h[x_num-1] = 50.0
if mode == 'original_pwl_2':
h[0] = 00.0
h[x_num-1] = 90.0
if mode == 'original_pwl_3':
h[0] = 0.0
h[x_num-1] = 60.0
if mode == 'original_pwl_4':
h[0] = 50.0
h[x_num-1] = 0.0
return h
def ic_test01 ( x_num, x, t , mode):
# ic_test01 evaluates the initial condition for problem 1.
# Input, integer X_NUM, the number of nodes.
# Input, real X(X_NUM), the node coordinates.
# Input, real T, the initial time.
# Output, real H(X_NUM), the heat values at the initial time.
#
import numpy as np
h = np.zeros ( x_num )
for i in range ( 0, x_num ):
#for uniform
if mode == 'original_uni_1':
h[i] = 50.0
if mode == 'original_uni_2':
h[i] = 25.0
if mode == 'original_uni_3':
h[i] = 0.0
if mode == 'original_uni_4':
h[i] = 10.0
if mode == 'original_tri_1':
A = 50
if (i< (float(x_num/2))):
h[i] = float(2*A*i/x_num)
else:
h[i] = -1 * (float(2*A*i/x_num)) + 2*A
if mode == 'original_tri_2':
A = 20
if (i< (float(x_num/2))):
h[i] = float(2*A*i/x_num)
else:
h[i] = -1 * (float(2*A*i/x_num)) + 2*A
if mode == 'original_tri_3':
A = 50
if (i< (float(x_num/2))):
h[i] = float(2*A*i/x_num)
else:
h[i] = -1 * (float(2*A*i/x_num)) + 2*A
if mode == 'original_tri_4':
A = 60
if (i< (float(x_num/2))):
h[i] = float(2*A*i/x_num)
else:
h[i] = -1 * (float(2*A*i/x_num)) + 2*A
if mode == 'original_pwl_1':
if (i < (float(x_num)/2) ):
h[i] = 0.0
else:
h[i] = 50.0
if mode == 'original_pwl_2':
if (i < (float(x_num*3/4)) ):
h[i] = 0.0
else:
h[i] = 70.0
if mode == 'original_pwl_3':
if (i < (float(x_num*3/4)) ):
h[i] = 20.0
else:
h[i] = 0.0
if mode == 'original_pwl_4':
if (i < (float(x_num*3/4)) ):
h[i] = 50.0
else:
h[i] = 0.0
return h
def k_test01 ( ):
"""
k_test01 evaluates the conductivity for problem 1.
Output, real K, the conducitivity.
"""
k = 0.002
return k
def rhs_test01 ( x_num, x, t ):
# RHS_TEST01 evaluates the right hand side for problem 1.
# Input, integer X_NUM, the number of nodes.
# Input, real X(X_NUM,1), the node coordinates.
# Input, real T, the current time.
# Output, real VALUE(X_NUM,1), the source term.
import numpy as np
value = np.zeros ( x_num )
return value
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
fd1d_heat_explicit_test01 (path, mode, weightsFile)
timestamp ( )
| mit |
mauzeh/formation-flight | runs/multihub/multivariate/plot.py | 1 | 3523 | import config
from lib.util import tsv_get_column_index
import os
import math
from lib.util import make_sure_path_exists
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
config.sink_dir = '%s/sink' % os.path.dirname(__file__)
config.count_hubs = 5
config.axis_x = {
'name' : r'$Z$',
'column' : 'config_Z'
}
config.axis_y = {
'name' : r'$L$',
'column' : 'config_lock_time'
}
config.interesting_z_axes = [{
'name' : 'Distance Penalty',
'column' : 'distance_penalty'
},{
'name' : 'Distance Success Rate',
'column' : 'distance_success_rate',
#'levels' : np.arange(.7,.8,.01)
},{
'name' : 'Formation Success Rate',
'column' : 'formation_success_rate'
},{
'name' : 'Average Formation Size',
'column' : 'avg_formation_size'
},{
'name' : 'Formation Count',
'column' : 'formation_count'
},{
'name' : 'Fuel Saved [%]',
'column' : 'fuel_saved'
},{
'name' : 'Fuel Saved [kg]',
'column' : 'fuel_saved_abs'
},{
'name' : 'Fuel Saved (Without Delay Costs)',
'column' : 'fuel_saved_disregard_delay'
},{
'name' : 'Average Hub Delay [min]',
'column' : 'hub_delay_avg'
},{
'name' : 'Delay Fuel [kg]',
'column' : 'fuel_delay'
},{
'name' : r'$Q_{avg}$',
'column' : 'Q_avg'
},{
'name' : 'Delay Fuel [kg]',
'column' : 'fuel_delay'
}]
def run():
data_file = '%s/latest.tsv' % config.sink_dir
data = np.loadtxt(
open(data_file, 'rb'),
delimiter = "\t",
skiprows = 1
)
axis_x = config.axis_x
axis_y = config.axis_y
i = 0
for axis_z in config.interesting_z_axes:
i += 1
print 'Plotting %s (%d of %d)' % (
axis_z['column'], i, len(config.interesting_z_axes)
)
plt.figure()
x = data[:, tsv_get_column_index(data_file, axis_x['column'])]
y = data[:, tsv_get_column_index(data_file, axis_y['column'])]
z = data[:, tsv_get_column_index(data_file, axis_z['column'])]
# Note that we must convert the lock time into the lock distance L
if axis_x['column'] == 'config_lock_time':
x = 300 * x / 60
if axis_y['column'] == 'config_lock_time':
y = 300 * y / 60
try:
nx = config.output_nx
ny = config.output_ny
except AttributeError:
N = len(z)
nx = math.sqrt(N)
ny = nx
#
#print 'variable: %s, nx = %d, ny = %d, count z = %d. z = %s' % (
# axis_z['column'],
# nx, ny, len(z), z
#)
x = x.reshape(nx, ny)
y = y.reshape(nx, ny)
z = z.reshape(nx, ny)
plt.xlabel(axis_x['name'])
plt.ylabel(axis_y['name'])
plt.grid(True)
try:
cs = plt.contour(x, y, z, axis_z['levels'])
except KeyError:
cs = plt.contour(x, y, z, 10)
plt.clabel(cs)
plt.colorbar()
#plt.title(r'%s ($n=%d$)' % (axis_z['name'], config.count_hubs))
t = plt.title(r'%s' % axis_z['name'])
#t.set_y(1.03)
fig_path = '%s/plot_%s.pdf' % (config.sink_dir, axis_z['column'])
fig_path = fig_path.replace('/runs/', '/plots/')
fig_path = fig_path.replace('/sink/', '/')
make_sure_path_exists(os.path.dirname(fig_path))
print fig_path
#plt.show()
#return
plt.savefig(fig_path) | mit |
cybernet14/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
thorwhalen/ut | ppi/naive_bayes_graph.py | 1 | 8556 | __author__ = 'thor'
import copy
import re
# import pandas as pd
# import numpy as np
# import ut as ut
#
# import ut.daf.get
class BipartiteStats(object):
"""
The class that manages the count data.
"""
# _count
# a
# b
# ab
# ba
def __init__(self, get_a_list_from_item=None, get_b_list_from_item=None):
self._count = CountVal(0.0)
self.a = KeyVal()
self.b = KeyVal()
self.ab = KeyVal()
self.ba = KeyVal()
self.get_a_list_from_item = get_a_list_from_item or get_a_list_from_item_default
self.get_b_list_from_item = get_b_list_from_item or get_b_list_from_item_default
def count_data(self, item_iterator, get_a_list_from_item=None, get_b_list_from_item=None):
self.__init__(get_a_list_from_item=get_a_list_from_item,
get_b_list_from_item=get_b_list_from_item)
for item in item_iterator:
self._count.increment()
a_list = self.get_a_list_from_item(item)
b_list = self.get_b_list_from_item(item)
for a in a_list:
self.a.add(KeyVal({a: Val(1.0)}))
for b in b_list:
self.b.add(KeyVal({b: Val(1.0)}))
for a in a_list:
for b in b_list:
self.ab.add(KeyVal({a: KeyVal({b: Val(1.0)})}))
self.ba.add(KeyVal({b: KeyVal({a: Val(1.0)})}))
# def normalize(self, alpha=1, beta=1):
# prior_num = Val(float(alpha))
# prior_denom = Val(float(alpha + beta))
# self.ab = (self.ab + prior_num) / (self.b + prior_denom)
# self.ba = (self.ab + prior_num) / (self.a + prior_denom)
# self.a = (self.a + prior_num) / (self._count + prior_denom)
# self.b = (self.b + prior_num) / (self._count + prior_denom)
# default functions
def get_a_list_from_item_default(pair_set):
return pair_set[0]
def get_b_list_from_item_default(pair_set):
return pair_set[1]
class BipartiteEdgeCounts(object):
"""
The class that manages the count data.
"""
# _count
# a_count
# b_count
# ab_count
# ba_count
def __init__(self, get_a_list_from_item=None, get_b_list_from_item=None):
self._count = CountVal(0.0)
self.a_count = KeyCount()
self.b_count = KeyCount()
self.ab_count = KeyCount()
self.ba_count = KeyCount()
self.get_a_list_from_item = get_a_list_from_item or get_a_list_from_item_default
self.get_b_list_from_item = get_b_list_from_item or get_b_list_from_item_default
def learn(self, item_iterator):
self.__init__()
for item in item_iterator:
self._count.increment()
a_list = self.get_a_list_from_item(item)
b_list = self.get_b_list_from_item(item)
for a in a_list:
self.a_count.increment(a)
for b in b_list:
self.b_count.increment(b)
for a in a_list:
for b in b_list:
self.ab_count.add(KeyVal({a: KeyVal({b: Val(1.0)})}))
self.ba_count.add(KeyVal({b: KeyVal({a: Val(1.0)})}))
class Val(object):
"""
The mother class of other Val classes.
A Val should hold a value and be able to add and subtract from it.
This mother class implements normal addition of floats, but should be overridden to
implement other types of values such as multiplication, addition of vectors,
merging of likelihoods etc.
Most of the time, you'll only need to override the add() and the sub() methods.
You may also want to override the default value. This value should act as the
'unit' or 'neutral' value of the add operation (therefore the sub operation as well).
For example, the unit value of multiplication (which will still be called "add") is 1.0.
"""
v = 0.0
def __init__(self, v):
if isinstance(v, Val):
self.v = copy.deepcopy(v.v)
else:
self.v = copy.deepcopy(v)
def add(self, y):
self.v = self.v + y.v
def sub(self, y):
self.v = self.v - y.v
def mul(self, y):
self.v = self.v * y.v
def div(self, y):
self.v = self.v / y.v
def unwrapped(self):
if hasattr(self.v, 'v'):
return self.v.unwrapped()
else:
return self.v
def __add__(self, y):
x = copy.deepcopy(self)
x.add(y)
return x
def __sub__(self, y):
x = copy.deepcopy(self)
x.sub(y)
return x
def __mul__(self, y):
x = copy.deepcopy(self)
x.mul(y)
return x
def __div__(self, y):
x = copy.deepcopy(self)
x.div(y)
return x
def __str__(self):
return str(self.v)
def __repr__(self):
return str(self.v)
class CountVal(Val):
v = 0.0
def __init__(self, v=0.0):
super(CountVal, self).__init__(v)
self.v = float(v)
def increment(self):
self.v += 1.0
class LHVal(Val):
"""
An LHVal manages a binary likelihood.
That is, it holds (as a single float) the binary likelihood distribution and allows one to
merge two such distributions.
"""
v = .5; # where the value will be stored
def __init__(self, v=.5):
super(LHVal, self).__init__(v)
self.v = float(v)
def add(self, y):
self.v = (self.v * y.v) / (self.v * y.v + (1 - self.v) * (1 - y.v))
def sub(self, y):
self.v = (self.v / y.v) / (self.v / y.v + (1 - self.v) / (1 - y.v))
class KeyVal(Val):
"""
Here the type of the value is a dict (to implement a map).
The addition of two dicts (therefore the add() method) v and w.
The add(val) method will here be defined to be a sum-update of the (key,value)
pairs of the
Extends a map so that one can add and subtract dict pairs by adding or subtracting
the (key-aligned) values
"""
def __init__(self, v=None):
if v is None:
self.v = dict()
else:
super(KeyVal, self).__init__(v)
def add(self, kv):
if hasattr(kv.v, 'keys'):
for k in list(kv.v.keys()):
if k in list(self.v.keys()):
self.v[k].add(kv.v[k])
else:
self.v[k] = kv.v[k]
else:
for k in list(self.v.keys()):
self.v[k].v = self.v[k].v + kv.v
def sub(self, kv):
if hasattr(kv.v, 'keys'):
for k in list(kv.v.keys()):
if k in list(self.v.keys()):
self.v[k].sub(kv.v[k])
else:
for k in list(self.v.keys()):
self.v[k].v = self.v[k].v - kv.v
def mul(self, kv):
if hasattr(kv.v, 'keys'):
for k in list(kv.v.keys()):
if k in list(self.v.keys()):
self.v[k].mul(kv.v[k])
else:
self.v[k] = kv.v[k]
else:
for k in list(self.v.keys()):
self.v[k].v = self.v[k].v * kv.v
def div(self, kv):
if hasattr(kv.v, 'keys'):
for k in list(kv.v.keys()):
if k in list(self.v.keys()):
self.v[k].div(kv.v[k])
else:
for k in list(self.v.keys()):
self.v[k].v = self.v[k].v / kv.v
def unwrapped(self):
return {k: v.unwrapped() for k, v in self.v.items()}
# d = dict()
# for k in self.v.keys():
# this_v = self.v[k]
# # print hasattr(this_v, 'v')
# # d.update({k: this_v.unwrapped()})
# if hasattr(this_v, 'v'):
# # print 'oh!'
# d.update({k: this_v.unwrapped()})
# else:
# # print 'ah?'
# d.update({k: this_v})
# return d
class KeyCount(KeyVal):
# v = dict()
# init_val_constructor = None;
"""
Extends a map so that one can add and subtract dict pairs by adding or subtracting the (key-aligned) values
"""
def __init__(self, v=None):
if v is None:
self.v = dict()
else:
super(KeyCount, self).__init__(v)
def increment(self, k):
if k in self.v:
self.v[k].add(Val(1.0))
else:
self.v[k] = Val(1.0)
# if __name__ == "__main__":
# d = ut.daf.get.rand(nrows=9)
# s = d['A'].iloc[0:5]
# ss = d['B'].iloc[3:8]
# t = s + ss
# print t
| mit |
jstoxrocky/statsmodels | statsmodels/sandbox/panel/mixed.py | 31 | 21019 | """
Mixed effects models
Author: Jonathan Taylor
Author: Josef Perktold
License: BSD-3
Notes
------
It's pretty slow if the model is misspecified, in my first example convergence
in loglike is not reached within 2000 iterations. Added stop criteria based
on convergence of parameters instead.
With correctly specified model, convergence is fast, in 6 iterations in
example.
"""
from __future__ import print_function
import numpy as np
import numpy.linalg as L
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.tools.decorators import cache_readonly
class Unit(object):
"""
Individual experimental unit for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
endog : ndarray, (nobs,)
response, endogenous variable
exog_fe : ndarray, (nobs, k_vars_fe)
explanatory variables as regressors or fixed effects,
should include exog_re to correct mean of random
coefficients, see Notes
exog_re : ndarray, (nobs, k_vars_re)
explanatory variables or random effects or coefficients
Notes
-----
If the exog_re variables are not included in exog_fe, then the
mean of the random constants or coefficients are not centered.
The covariance matrix of the random parameter estimates are not
centered in this case. (That's how it looks to me. JP)
"""
def __init__(self, endog, exog_fe, exog_re):
self.Y = endog
self.X = exog_fe
self.Z = exog_re
self.n = endog.shape[0]
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T)))
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S)
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha)
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ?
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X)
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t)
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML)
class OneWayMixed(object):
"""
Model for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
units : list of units
the data for the individual units should be attached to the units
response, fixed and random : formula expression, called as argument to Formula
*available results and alias*
(subject to renaming, and coversion to cached attributes)
params() -> self.a : coefficient for fixed effects or exog
cov_params() -> self.Sinv : covariance estimate of fixed effects/exog
bse() : standard deviation of params
cov_random -> self.D : estimate of random effects covariance
params_random_units -> [self.units[...].b] : random coefficient for each unit
*attributes*
(others)
self.m : number of units
self.p : k_vars_fixed
self.q : k_vars_random
self.N : nobs (total)
Notes
-----
Fit returns a result instance, but not all results that use the inherited
methods have been checked.
Parameters need to change: drop formula and we require a naming convention for
the units (currently Y,X,Z). - endog, exog_fe, endog_re ?
logL does not include constant, e.g. sqrt(pi)
llf is for MLE not for REML
convergence criteria for iteration
Currently convergence in the iterative solver is reached if either the loglikelihood
*or* the fixed effects parameter don't change above tolerance.
In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations
while the log likelihood did not converge within 2000 iterations. This might be
the case if the fixed effects parameters are well estimated, but there are still
changes in the random effects. If params_rtol and params_atol are set at a higher
level, then the random effects might not be estimated to a very high precision.
The above was with a misspecified model, without a constant. With a
correctly specified model convergence is fast, within a few iterations
(6 in example).
"""
def __init__(self, units):
self.units = units
self.m = len(self.units)
self.n_units = self.m
self.N = sum(unit.X.shape[0] for unit in self.units)
self.nobs = self.N #alias for now
# Determine size of fixed effects
d = self.units[0].X
self.p = d.shape[1] # d.shape = p
self.k_exog_fe = self.p #alias for now
self.a = np.zeros(self.p, np.float64)
# Determine size of D, and sensible initial estimates
# of sigma and D
d = self.units[0].Z
self.q = d.shape[1] # Z.shape = q
self.k_exog_re = self.q #alias for now
self.D = np.zeros((self.q,)*2, np.float64)
self.sigma = 1.
self.dev = np.inf #initialize for iterations, move it?
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y)
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N)
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv
#----------- alias (JP) move to results class ?
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D
@property
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a
@property
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units])
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed()
@property
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params()))
#----------- end alias
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML)
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I don't know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL
def initialize(self):
S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
self.a = L.lstsq(S, Y)[0]
D = 0
t = 0
sigmasq = 0
for unit in self.units:
unit.r = unit.Y - np.dot(unit.X, self.a)
if self.q > 1:
unit.b = L.lstsq(unit.Z, unit.r)[0]
else:
Z = unit.Z.reshape((unit.Z.shape[0], 1))
unit.b = L.lstsq(Z, unit.r)[0]
sigmasq += (np.power(unit.Y, 2).sum() -
(self.a * np.dot(unit.X.T, unit.Y)).sum() -
(unit.b * np.dot(unit.Z.T, unit.r)).sum())
D += np.multiply.outer(unit.b, unit.b)
t += L.pinv(np.dot(unit.Z.T, unit.Z))
#TODO: JP added df_resid check
self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
self.sigma = np.sqrt(sigmasq)
self.D = (D - sigmasq * t) / self.m
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True
def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6):
#initialize for convergence criteria
self._a_old = np.inf * self.a
self.history = {'llf':[], 'params':[], 'D':[]}
for i in range(maxiter):
self._compute_a() #a, Sinv : params, cov_params of fixed exog
self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ?
self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML
if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol,
params_atol=params_atol):
break
else: #if end of loop is reached without break
self.termination = 'maxiter'
print('Warning: maximum number of iterations reached')
self.iterations = i
results = OneWayMixedResults(self)
#compatibility functions for fixed effects/exog
results.scale = 1
results.normalized_cov_params = self.cov_params()
return results
class OneWayMixedResults(LikelihoodModelResults):
'''Results class for OneWayMixed models
'''
def __init__(self, model):
#TODO: check, change initialization to more standard pattern
self.model = model
self.params = model.params
#need to overwrite this because we don't have a standard
#model.loglike yet
#TODO: what todo about REML loglike, logL is not normalized
@cache_readonly
def llf(self):
return self.model.logL(ML=True)
@property
def params_random_units(self):
return self.model.params_random_units
def cov_random(self):
return self.model.cov_random()
def mean_random(self, idx='lastexog'):
if idx == 'lastexog':
meanr = self.params[-self.model.k_exog_re:]
elif isinstance(idx, list):
if not len(idx) == self.model.k_exog_re:
raise ValueError('length of idx different from k_exog_re')
else:
meanr = self.params[idx]
else:
meanr = np.zeros(self.model.k_exog_re)
return meanr
def std_random(self):
return np.sqrt(np.diag(self.cov_random()))
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
fig : matplotlib figure instance
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
# bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig
def plot_scatter_all_pairs(self, title=None):
from statsmodels.graphics.plot_grids import scatter_ellipse
if self.model.k_exog_re < 2:
raise ValueError('less than two variables available')
return scatter_ellipse(self.params_random_units,
ell_kwds={'color':'r'})
#ell_kwds not implemented yet
# #note I have written this already as helper function, get it
# import matplotlib.pyplot as plt
# #from scipy.stats import norm as normal
# fig = plt.figure()
# k = self.model.k_exog_re
# n_plots = k * (k - 1) // 2
# if n_plots > 3:
# rows, cols = int(np.ceil(n_plots * 0.5)), 2
# else:
# rows, cols = n_plots, 1
#
# count = 1
# for ii in range(k):
# for jj in range(ii):
# ax = fig.add_subplot(rows, cols, count)
# self.plot_scatter_pairs(ii, jj, title=None, ax=ax)
# count += 1
#
# return fig
if __name__ == '__main__':
#see examples/ex_mixed_lls_1.py
pass
| bsd-3-clause |
CharlesShang/TFFRCNN | lib/gt_data_layer/minibatch.py | 3 | 4873 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from ..utils.blob import prep_im_for_blob, im_list_to_blob
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
im_blob = _get_image_blob(roidb)
# build the box information blob
info_boxes_blob = np.zeros((0, 18), dtype=np.float32)
num_scale = len(cfg.TRAIN.SCALES)
for i in xrange(num_images):
info_boxes = roidb[i]['info_boxes']
# change the batch index
info_boxes[:,2] += i * num_scale
info_boxes[:,7] += i * num_scale
info_boxes_blob = np.vstack((info_boxes_blob, info_boxes))
# build the parameter blob
num_aspect = len(cfg.TRAIN.ASPECTS)
num = 2 + 2 * num_scale + 2 * num_aspect
parameters_blob = np.zeros((num), dtype=np.float32)
parameters_blob[0] = num_scale
parameters_blob[1] = num_aspect
parameters_blob[2:2+num_scale] = cfg.TRAIN.SCALES
parameters_blob[2+num_scale:2+2*num_scale] = cfg.TRAIN.SCALE_MAPPING
parameters_blob[2+2*num_scale:2+2*num_scale+num_aspect] = cfg.TRAIN.ASPECT_HEIGHTS
parameters_blob[2+2*num_scale+num_aspect:2+2*num_scale+2*num_aspect] = cfg.TRAIN.ASPECT_WIDTHS
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, sublabels_blob)
blobs = {'data': im_blob,
'info_boxes': info_boxes_blob,
'parameters': parameters_blob}
return blobs
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the different scales.
"""
num_images = len(roidb)
processed_ims = []
for i in xrange(num_images):
# read image
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
# build image pyramid
for im_scale in cfg.TRAIN.SCALES_BASE:
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, sublabels_blob):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[2:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
subcls = sublabels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' subclass: ', subcls
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
nomadcube/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
hlin117/scikit-learn | sklearn/learning_curve.py | 35 | 15441 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the functions are moved."
" This module will be removed in 0.20",
DeprecationWarning)
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0,
error_score='raise'):
"""Learning curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.learning_curve` instead.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True,
error_score=error_score)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.validation_curve` instead.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<sphx_glr_auto_examples_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
warmspringwinds/scikit-image | doc/examples/plot_rank_mean.py | 17 | 1499 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
| bsd-3-clause |
kHarshit/DAT210x_Microsoft | Module2/assignment2_.py | 1 | 1370 | import pandas as pd
# TODO: Load up the dataset Ensuring you set the appropriate header column names
df = pd.read_csv('Datasets/direct_marketing.csv')
print(df)
# print(df.recency) # produces a series object
# print(df['recency']) # produces a dataframe
"""By using the column name in the code, it's very easy to discern what is being pulled, and you don't have to worry
about the order of the columns. Doing this lookup of first matching the column name before slicing the column index
is marginally slower than directly accessing the column by index."""
# print(df[['recency']])
# print(df.loc[:, 'recency']) # selects by column label
# print(df.loc[:, ['recency']])
"""By passing in a list of parameters, you can select more than one column to slice. If you use this syntax, even if
you only specify a single column, the data type that you'll get back is a dataframe as opposed to a series"""
print(df.iloc[:, 0]) # selects by column index
# print(df.iloc[-5:, :])
# print(df.iloc[:, [0]])
# print(df.ix[:, 0]) # to use hybrid approach of either
"""Note that .loc[] and .ix[] are inclusive of the range of values selected, where .iloc[] is non-inclusive. In that
sense, df.loc[0:1, :] would select the first two rows, but only the first row would be returned using df.iloc[0:1, :]."""
# print(df.recency < 7)
# print(df[(df.recency < 7) & (df.newbie == 0)])
| mit |
iamkingmaker/zipline | zipline/examples/dual_ema_talib.py | 16 | 3247 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
def initialize(context):
context.asset = symbol('AAPL')
# Add 2 mavg transforms, one with a long window, one with a short window.
context.short_ema_trans = EMA(timeperiod=20)
context.long_ema_trans = EMA(timeperiod=40)
# To keep track of whether we invested in the stock or not
context.invested = False
def handle_data(context, data):
short_ema = context.short_ema_trans.handle_data(data)
long_ema = context.long_ema_trans.handle_data(data)
if short_ema is None or long_ema is None:
return
buy = False
sell = False
if (short_ema > long_ema).all() and not context.invested:
order(context.asset, 100)
context.invested = True
buy = True
elif (short_ema < long_ema).all() and context.invested:
order(context.asset, -100)
context.invested = False
sell = True
record(AAPL=data[context.asset].price,
short_ema=short_ema[context.asset],
long_ema=long_ema[context.asset],
buy=buy,
sell=sell)
if __name__ == '__main__':
from datetime import datetime
import logbook
import matplotlib.pyplot as plt
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.api import order, record, symbol
from zipline.utils.factory import load_from_yahoo
logbook.StderrHandler().push_application()
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data).dropna()
fig = plt.figure()
ax1 = fig.add_subplot(211, ylabel='portfolio value')
results.portfolio_value.plot(ax=ax1)
ax2 = fig.add_subplot(212)
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
plt.show()
| apache-2.0 |
Mctigger/KagglePlanetPytorch | nn_finetune_densenet_169.py | 1 | 5085 | import os
import sys
from itertools import chain
import torchvision.models
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
import torch.nn.init
from torch.utils.data import DataLoader
from torchsample.modules import ModuleTrainer
from torchsample.callbacks import CSVLogger, LearningRateScheduler
import sklearn.model_selection
import paths
import labels
import transforms
import callbacks
from datasets import KaggleAmazonJPGDataset
name = os.path.basename(sys.argv[0])[:-3]
def generate_model():
class DenseModel(nn.Module):
def __init__(self, pretrained_model):
super(DenseModel, self).__init__()
self.classifier = nn.Linear(pretrained_model.classifier.in_features, 17)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
self.features = pretrained_model.features
self.dense1 = pretrained_model.features._modules['denseblock1']
self.dense2 = pretrained_model.features._modules['denseblock2']
self.dense3 = pretrained_model.features._modules['denseblock3']
self.dense4 = pretrained_model.features._modules['denseblock4']
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=8).view(features.size(0), -1)
out = F.sigmoid(self.classifier(out))
return out
return DenseModel(torchvision.models.densenet169(pretrained=True))
random_state = 1
labels_df = labels.get_labels_df()
kf = sklearn.model_selection.KFold(n_splits=5, shuffle=True, random_state=random_state)
split = kf.split(labels_df)
def train_net(train, val, model, name):
transformations_train = transforms.apply_chain([
transforms.to_float,
transforms.random_fliplr(),
transforms.random_flipud(),
transforms.augment_color(0.1),
transforms.augment(),
torchvision.transforms.ToTensor()
])
transformations_val = transforms.apply_chain([
transforms.to_float,
torchvision.transforms.ToTensor()
])
dset_train = KaggleAmazonJPGDataset(train, paths.train_jpg, transformations_train, divide=False)
train_loader = DataLoader(dset_train,
batch_size=32,
shuffle=True,
num_workers=10,
pin_memory=True)
dset_val = KaggleAmazonJPGDataset(val, paths.train_jpg, transformations_val, divide=False)
val_loader = DataLoader(dset_val,
batch_size=32,
num_workers=10,
pin_memory=True)
ignored_params = list(map(id, chain(
model.classifier.parameters(),
model.dense1.parameters(),
model.dense2.parameters(),
model.dense3.parameters(),
model.dense4.parameters()
)))
base_params = filter(lambda p: id(p) not in ignored_params,
model.parameters())
optimizer = optim.Adam([
{'params': base_params},
{'params': model.dense1.parameters()},
{'params': model.dense2.parameters()},
{'params': model.dense3.parameters()},
{'params': model.dense4.parameters()},
{'params': model.classifier.parameters()}
], lr=0, weight_decay=0.0005)
trainer = ModuleTrainer(model)
# New settings
def schedule(current_epoch, current_lrs, **logs):
lrs = [1e-3, 1e-4, 0.5e-4, 1e-5, 0.5e-5]
epochs = [0, 4, 15, 20, 25]
for lr, epoch in zip(lrs, epochs):
if current_epoch >= epoch:
current_lrs[5] = lr
if current_epoch >= 2:
current_lrs[4] = lr * 1
current_lrs[3] = lr * 1
current_lrs[2] = lr * 0.5
current_lrs[1] = lr * 0.2
current_lrs[0] = lr * 0.1
return current_lrs
trainer.set_callbacks([
callbacks.ModelCheckpoint(
paths.models,
name,
save_best_only=False,
saving_strategy=lambda epoch: True
),
CSVLogger('./logs/' + name),
LearningRateScheduler(schedule)
])
trainer.compile(loss=nn.BCELoss(),
optimizer=optimizer)
trainer.fit_loader(train_loader,
val_loader,
nb_epoch=30,
verbose=1,
cuda_device=0)
if __name__ == "__main__":
for i, (train_idx, val_idx) in enumerate(split):
name = os.path.basename(sys.argv[0])[:-3] + '-split_' + str(i)
train_net(labels_df.ix[train_idx], labels_df.ix[val_idx], generate_model(), name) | mit |
RMKD/networkx | networkx/convert_matrix.py | 13 | 33243 | """Functions to convert NetworkX graphs to and from numpy/scipy matrices.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a = numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D = nx.DiGraph(a)
or equivalently
>>> D = nx.to_networkx_graph(a,create_using=nx.DiGraph())
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2014 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import warnings
import itertools
import networkx as nx
from networkx.convert import _prep_create_using
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['from_numpy_matrix', 'to_numpy_matrix',
'from_pandas_dataframe', 'to_pandas_dataframe',
'to_numpy_recarray',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
def to_pandas_dataframe(G, nodelist=None, multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a Pandas DataFrame.
Parameters
----------
G : graph
The NetworkX graph used to construct the Pandas DataFrame.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None, optional
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float, optional
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
df : Pandas DataFrame
Graph adjacency matrix
Notes
-----
The DataFrame entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the 'multigraph_weight' parameter. The default is to
sum the weight attributes for each of the parallel edges.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Pandas DataFrame can be modified as follows:
>>> import pandas as pd
>>> import numpy as np
>>> G = nx.Graph([(1,1)])
>>> df = nx.to_pandas_dataframe(G)
>>> df
1
1 1
>>> df.values[np.diag_indices_from(df)] *= 2
>>> df
1
1 2
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_pandas_dataframe(G, nodelist=[0,1,2])
0 1 2
0 0 2 0
1 1 0 0
2 0 0 4
"""
import pandas as pd
M = to_numpy_matrix(G, nodelist, None, None, multigraph_weight, weight, nonedge)
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
df = pd.DataFrame(data=M, index = nodelist ,columns = nodelist)
return df
def from_pandas_dataframe(df, source, target, edge_attr=None,
create_using=None):
"""Return a graph from Pandas DataFrame.
The Pandas DataFrame should contain at least two columns of node names and
zero or more columns of node attributes. Each row will be processed as one
edge instance.
Note: This function iterates over DataFrame.values, which is not
guaranteed to retain the data type across columns in the row. This is only
a problem if your row is entirely numeric and a mix of ints and floats. In
that case, all values will be returned as floats. See the
DataFrame.iterrows documentation for an example.
Parameters
----------
df : Pandas DataFrame
An edge list representation of a graph
source : str or int
A valid column name (string or iteger) for the source nodes (for the
directed case).
target : str or int
A valid column name (string or iteger) for the target nodes (for the
directed case).
edge_attr : str or int, iterable, True
A valid column name (str or integer) or list of column names that will
be used to retrieve items from the row and add them to the graph as edge
attributes. If `True`, all of the remaining columns will be added.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
See Also
--------
to_pandas_dataframe
Examples
--------
Simple integer weights on edges:
>>> import pandas as pd
>>> import numpy as np
>>> r = np.random.RandomState(seed=5)
>>> ints = r.random_integers(1, 10, size=(3,2))
>>> a = ['A', 'B', 'C']
>>> b = ['D', 'A', 'E']
>>> df = pd.DataFrame(ints, columns=['weight', 'cost'])
>>> df[0] = a
>>> df['b'] = b
>>> df
weight cost 0 b
0 4 7 A D
1 7 1 B A
2 10 9 C E
>>> G=nx.from_pandas_dataframe(df, 0, 'b', ['weight', 'cost'])
>>> G['E']['C']['weight']
10
>>> G['E']['C']['cost']
9
"""
g = _prep_create_using(create_using)
# Index of source and target
src_i = df.columns.get_loc(source)
tar_i = df.columns.get_loc(target)
if edge_attr:
# If all additional columns requested, build up a list of tuples
# [(name, index),...]
if edge_attr is True:
# Create a list of all columns indices, ignore nodes
edge_i = []
for i, col in enumerate(df.columns):
if col is not source and col is not target:
edge_i.append((col, i))
# If a list or tuple of name is requested
elif isinstance(edge_attr, (list, tuple)):
edge_i = [(i, df.columns.get_loc(i)) for i in edge_attr]
# If a string or int is passed
else:
edge_i = [(edge_attr, df.columns.get_loc(edge_attr)),]
# Iteration on values returns the rows as Numpy arrays
for row in df.values:
g.add_edge(row[src_i], row[tar_i], {i:row[j] for i, j in edge_i})
# If no column names are given, then just return the edges.
else:
for row in df.values:
g.add_edge(row[src_i], row[tar_i])
return g
def to_numpy_matrix(G, nodelist=None, dtype=None, order=None,
multigraph_weight=sum, weight='weight', nonedge=0.0):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in ``nodelist``.
If ``nodelist`` is None, then the ordering is produced by G.nodes().
dtype : NumPy data type, optional
A valid single NumPy data type used to initialize the array.
This must be a simple type such as int or numpy.float64 and
not a compound data type (see to_numpy_recarray)
If None, then the NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
multigraph_weight : {sum, min, max}, optional
An operator that determines how weights in multigraphs are handled.
The default is to sum the weights of the multiple edges.
weight : string or None optional (default = 'weight')
The edge attribute that holds the numerical value used for
the edge weight. If an edge does not have that attribute, then the
value 1 is used instead.
nonedge : float (default = 0.0)
The matrix values corresponding to nonedges are typically set to zero.
However, this could be undesirable if there are matrix values
corresponding to actual edges that also have the value zero. If so,
one might prefer nonedges to have some other value, such as nan.
Returns
-------
M : NumPy matrix
Graph adjacency matrix
See Also
--------
to_numpy_recarray, from_numpy_matrix
Notes
-----
The matrix entries are assigned to the weight edge attribute. When
an edge does not have a weight attribute, the value of the entry is set to
the number 1. For multiple (parallel) edges, the values of the entries
are determined by the ``multigraph_weight`` parameter. The default is to
sum the weight attributes for each of the parallel edges.
When ``nodelist`` does not contain every node in ``G``, the matrix is built
from the subgraph of ``G`` that is induced by the nodes in ``nodelist``.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Numpy matrix can be modified as follows:
>>> import numpy as np
>>> G = nx.Graph([(1, 1)])
>>> A = nx.to_numpy_matrix(G)
>>> A
matrix([[ 1.]])
>>> A.A[np.diag_indices_from(A)] *= 2
>>> A
matrix([[ 2.]])
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
# Initially, we start with an array of nans. Then we populate the matrix
# using data from the graph. Afterwards, any leftover nans will be
# converted to the value of `nonedge`. Note, we use nans initially,
# instead of zero, for two reasons:
#
# 1) It can be important to distinguish a real edge with the value 0
# from a nonedge with the value 0.
#
# 2) When working with multi(di)graphs, we must combine the values of all
# edges between any two nodes in some manner. This often takes the
# form of a sum, min, or max. Using the value 0 for a nonedge would
# have undesirable effects with min and max, but using nanmin and
# nanmax with initially nan values is not problematic at all.
#
# That said, there are still some drawbacks to this approach. Namely, if
# a real edge is nan, then that value is a) not distinguishable from
# nonedges and b) is ignored by the default combinator (nansum, nanmin,
# nanmax) functions used for multi(di)graphs. If this becomes an issue,
# an alternative approach is to use masked arrays. Initially, every
# element is masked and set to some `initial` value. As we populate the
# graph, elements are unmasked (automatically) when we combine the initial
# value with the values given by real edges. At the end, we convert all
# masked values to `nonedge`. Using masked arrays fully addresses reason 1,
# but for reason 2, we would still have the issue with min and max if the
# initial values were 0.0. Note: an initial value of +inf is appropriate
# for min, while an initial value of -inf is appropriate for max. When
# working with sum, an initial value of zero is appropriate. Ideally then,
# we'd want to allow users to specify both a value for nonedges and also
# an initial value. For multi(di)graphs, the choice of the initial value
# will, in general, depend on the combinator function---sensible defaults
# can be provided.
if G.is_multigraph():
# Handle MultiGraphs and MultiDiGraphs
M = np.zeros((nlen, nlen), dtype=dtype, order=order) + np.nan
# use numpy nan-aware operations
operator={sum:np.nansum, min:np.nanmin, max:np.nanmax}
try:
op=operator[multigraph_weight]
except:
raise ValueError('multigraph_weight must be sum, min, or max')
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i, j = index[u], index[v]
e_weight = attrs.get(weight, 1)
M[i,j] = op([e_weight, M[i,j]])
if undirected:
M[j,i] = M[i,j]
else:
# Graph or DiGraph, this is much faster than above
M = np.zeros((nlen,nlen), dtype=dtype, order=order) + np.nan
for u,nbrdict in G.adjacency_iter():
for v,d in nbrdict.items():
try:
M[index[u],index[v]] = d.get(weight,1)
except KeyError:
# This occurs when there are fewer desired nodes than
# there are nodes in the graph: len(nodelist) < len(G)
pass
M[np.isnan(M)] = nonedge
M = np.asmatrix(M)
return M
def from_numpy_matrix(A, parallel_edges=False, create_using=None):
"""Return a graph from numpy matrix.
The numpy matrix is interpreted as an adjacency matrix for the graph.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, ``create_using`` is a multigraph, and ``A`` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Notes
-----
If ``create_using`` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, ``parallel_edges`` is ``True``, and the
entries of ``A`` are of type ``int``, then this function returns a multigraph
(of the same type as ``create_using``) with parallel edges.
If ``create_using`` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
If the numpy matrix has a single data type for each matrix entry it
will be converted to an appropriate Python data type.
If the numpy matrix has a user-specified compound data type the names
of the data fields will be used as attribute keys in the resulting
NetworkX graph.
See Also
--------
to_numpy_matrix, to_numpy_recarray
Examples
--------
Simple integer weights on edges:
>>> import numpy
>>> A=numpy.matrix([[1, 1], [2, 1]])
>>> G=nx.from_numpy_matrix(A)
If ``create_using`` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> G = nx.from_numpy_matrix(A, create_using = nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If ``create_using`` is a multigraph and the matrix has only integer entries
but ``parallel_edges`` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import numpy
>>> A = numpy.matrix([[1, 1], [1, 2]])
>>> temp = nx.MultiGraph()
>>> G = nx.from_numpy_matrix(A, parallel_edges = True, create_using = temp)
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
User defined compound data type on edges:
>>> import numpy
>>> dt = [('weight', float), ('cost', int)]
>>> A = numpy.matrix([[(1.0, 2)]], dtype = dt)
>>> G = nx.from_numpy_matrix(A)
>>> G.edges()
[(0, 0)]
>>> G[0][0]['cost']
2
>>> G[0][0]['weight']
1.0
"""
# This should never fail if you have created a numpy matrix with numpy...
import numpy as np
kind_to_python_type={'f':float,
'i':int,
'u':int,
'b':bool,
'c':complex,
'S':str,
'V':'void'}
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
kind_to_python_type['U']=str
except ValueError: # Python 2.6+
kind_to_python_type['U']=unicode
G=_prep_create_using(create_using)
n,m=A.shape
if n!=m:
raise nx.NetworkXError("Adjacency matrix is not square.",
"nx,ny=%s"%(A.shape,))
dt=A.dtype
try:
python_type=kind_to_python_type[dt.kind]
except:
raise TypeError("Unknown numpy data type: %s"%dt)
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Get a list of all the entries in the matrix with nonzero entries. These
# coordinates will become the edges in the graph.
edges = zip(*(np.asarray(A).nonzero()))
# handle numpy constructed data type
if python_type is 'void':
# Sort the fields by their offset, then by dtype, then by name.
fields = sorted((offset, dtype, name) for name, (dtype, offset) in
A.dtype.fields.items())
triples = ((u, v, {name: kind_to_python_type[dtype.kind](val)
for (_, dtype, name), val in zip(fields, A[u, v])})
for u, v in edges)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
elif python_type is int and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, dict(weight=1)) for d in range(A[u, v]))
for (u, v) in edges)
else: # basic data type
triples = ((u, v, dict(weight=python_type(A[u, v])))
for u, v in edges)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when ``G.add_edges_from()`` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_edges_from(triples)
return G
@not_implemented_for('multigraph')
def to_numpy_recarray(G,nodelist=None,
dtype=[('weight',float)],
order=None):
"""Return the graph adjacency matrix as a NumPy recarray.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy named dtype used to initialize the NumPy recarray.
The data type names are assumed to be keys in the graph edge attribute
dictionary.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy recarray
The graph with specified edge data as a Numpy recarray
Notes
-----
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.Graph()
>>> G.add_edge(1,2,weight=7.0,cost=5)
>>> A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
>>> print(A.weight)
[[ 0. 7.]
[ 7. 0.]]
>>> print(A.cost)
[[0 5]
[5 0]]
"""
import numpy as np
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
names=M.dtype.names
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
values=tuple([attrs[n] for n in names])
M[i,j] = values
if undirected:
M[j,i] = M[i,j]
return M.view(np.recarray)
def to_scipy_sparse_matrix(G, nodelist=None, dtype=None,
weight='weight', format='csr'):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
The type of the matrix to be returned (default 'csr'). For
some algorithms different implementations of sparse matrices
can perform better. See [1]_ for details.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the edge attribute held in
parameter weight. When an edge does not have that attribute, the
value of the entry is 1.
For multiple edges the matrix values are the sums of the edge weights.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses coo_matrix format. To convert to other formats specify the
format= keyword.
The convention used for self-loop edges in graphs is to assign the
diagonal matrix entry value to the weight attribute of the edge
(or the number 1 if the edge has no weight attribute). If the
alternate convention of doubling the edge weight is desired the
resulting Scipy sparse matrix can be modified as follows:
>>> import scipy as sp
>>> G = nx.Graph([(1,1)])
>>> A = nx.to_scipy_sparse_matrix(G)
>>> print(A.todense())
[[1]]
>>> A.setdiag(A.diagonal()*2)
>>> print(A.todense())
[[2]]
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> print(S.todense())
[[0 2 0]
[1 0 0]
[0 0 4]]
References
----------
.. [1] Scipy Dev. References, "Sparse Matrices",
http://docs.scipy.org/doc/scipy/reference/sparse.html
"""
from scipy import sparse
if nodelist is None:
nodelist = G
nlen = len(nodelist)
if nlen == 0:
raise nx.NetworkXError("Graph has no nodes or edges")
if len(nodelist) != len(set(nodelist)):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise nx.NetworkXError(msg)
index = dict(zip(nodelist,range(nlen)))
if G.number_of_edges() == 0:
row,col,data=[],[],[]
else:
row,col,data = zip(*((index[u],index[v],d.get(weight,1))
for u,v,d in G.edges_iter(nodelist, data=True)
if u in index and v in index))
if G.is_directed():
M = sparse.coo_matrix((data,(row,col)),
shape=(nlen,nlen), dtype=dtype)
else:
# symmetrize matrix
d = data + data
r = row + col
c = col + row
# selfloop entries get double counted when symmetrizing
# so we subtract the data on the diagonal
selfloops = G.selfloop_edges(data=True)
if selfloops:
diag_index,diag_data = zip(*((index[u],-d.get(weight,1))
for u,v,d in selfloops
if u in index and v in index))
d += diag_data
r += diag_index
c += diag_index
M = sparse.coo_matrix((d, (r, c)), shape=(nlen,nlen), dtype=dtype)
try:
return M.asformat(format)
except AttributeError:
raise nx.NetworkXError("Unknown sparse matrix format: %s"%format)
def _csr_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Row** format to
an iterable of weighted edge triples.
"""
nrows = A.shape[0]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(nrows):
for j in range(indptr[i], indptr[i+1]):
yield i, indices[j], data[j]
def _csc_gen_triples(A):
"""Converts a SciPy sparse matrix in **Compressed Sparse Column** format to
an iterable of weighted edge triples.
"""
ncols = A.shape[1]
data, indices, indptr = A.data, A.indices, A.indptr
for i in range(ncols):
for j in range(indptr[i], indptr[i+1]):
yield indices[j], i, data[j]
def _coo_gen_triples(A):
"""Converts a SciPy sparse matrix in **Coordinate** format to an iterable
of weighted edge triples.
"""
row, col, data = A.row, A.col, A.data
return zip(row, col, data)
def _dok_gen_triples(A):
"""Converts a SciPy sparse matrix in **Dictionary of Keys** format to an
iterable of weighted edge triples.
"""
for (r, c), v in A.items():
yield r, c, v
def _generate_weighted_edges(A):
"""Returns an iterable over (u, v, w) triples, where u and v are adjacent
vertices and w is the weight of the edge joining u and v.
`A` is a SciPy sparse matrix (in any format).
"""
if A.format == 'csr':
return _csr_gen_triples(A)
if A.format == 'csc':
return _csc_gen_triples(A)
if A.format == 'dok':
return _dok_gen_triples(A)
# If A is in any other format (including COO), convert it to COO format.
return _coo_gen_triples(A.tocoo())
def from_scipy_sparse_matrix(A, parallel_edges=False, create_using=None,
edge_attribute='weight'):
"""Creates a new graph from an adjacency matrix given as a SciPy sparse
matrix.
Parameters
----------
A: scipy sparse matrix
An adjacency matrix representation of a graph
parallel_edges : Boolean
If this is ``True``, `create_using` is a multigraph, and `A` is an
integer matrix, then entry *(i, j)* in the matrix is interpreted as the
number of parallel edges joining vertices *i* and *j* in the graph. If it
is ``False``, then the entries in the adjacency matrix are interpreted as
the weight of a single edge joining the vertices.
create_using: NetworkX graph
Use specified graph for result. The default is Graph()
edge_attribute: string
Name of edge attribute to store matrix numeric value. The data will
have the same type as the matrix entry (int, float, (real,imag)).
Notes
-----
If `create_using` is an instance of :class:`networkx.MultiGraph` or
:class:`networkx.MultiDiGraph`, `parallel_edges` is ``True``, and the
entries of `A` are of type ``int``, then this function returns a multigraph
(of the same type as `create_using`) with parallel edges. In this case,
`edge_attribute` will be ignored.
If `create_using` is an undirected multigraph, then only the edges
indicated by the upper triangle of the matrix `A` will be added to the
graph.
Examples
--------
>>> import scipy.sparse
>>> A = scipy.sparse.eye(2,2,1)
>>> G = nx.from_scipy_sparse_matrix(A)
If `create_using` is a multigraph and the matrix has only integer entries,
the entries will be interpreted as weighted edges joining the vertices
(without creating parallel edges):
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 2}}
If `create_using` is a multigraph and the matrix has only integer entries
but `parallel_edges` is ``True``, then the entries will be interpreted as
the number of parallel edges joining those two vertices:
>>> import scipy
>>> A = scipy.sparse.csr_matrix([[1, 1], [1, 2]])
>>> G = nx.from_scipy_sparse_matrix(A, parallel_edges=True,
... create_using=nx.MultiGraph())
>>> G[1][1]
{0: {'weight': 1}, 1: {'weight': 1}}
"""
G = _prep_create_using(create_using)
n,m = A.shape
if n != m:
raise nx.NetworkXError(\
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,))
# Make sure we get even the isolated nodes of the graph.
G.add_nodes_from(range(n))
# Create an iterable over (u, v, w) triples and for each triple, add an
# edge from u to v with weight w.
triples = _generate_weighted_edges(A)
# If the entries in the adjacency matrix are integers, the graph is a
# multigraph, and parallel_edges is True, then create parallel edges, each
# with weight 1, for each entry in the adjacency matrix. Otherwise, create
# one edge for each positive entry in the adjacency matrix and set the
# weight of that edge to be the entry in the matrix.
if A.dtype.kind in ('i', 'u') and G.is_multigraph() and parallel_edges:
chain = itertools.chain.from_iterable
# The following line is equivalent to:
#
# for (u, v) in edges:
# for d in range(A[u, v]):
# G.add_edge(u, v, weight=1)
#
triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
# If we are creating an undirected multigraph, only add the edges from the
# upper triangle of the matrix. Otherwise, add all the edges. This relies
# on the fact that the vertices created in the
# ``_generated_weighted_edges()`` function are actually the row/column
# indices for the matrix ``A``.
#
# Without this check, we run into a problem where each edge is added twice
# when `G.add_weighted_edges_from()` is invoked below.
if G.is_multigraph() and not G.is_directed():
triples = ((u, v, d) for u, v, d in triples if u <= v)
G.add_weighted_edges_from(triples, weight=edge_attribute)
return G
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| bsd-3-clause |
lreis2415/SEIMS | seims/pyseims_check.py | 1 | 2306 | """Check the requirements of pySEIMS.
"""
from __future__ import absolute_import, unicode_literals
# 1. pygeoc
try:
import pygeoc
from pygeoc.raster import *
from pygeoc.vector import *
from pygeoc.hydro import *
from pygeoc.utils import *
except ImportError:
print('ERROR: PyGeoC is not successfully installed, please check and retry!')
else:
print('PyGeoC-%s has been installed!' % pygeoc.__version__)
# 2. gdal
try:
import osgeo
from osgeo import ogr
from osgeo import osr
from osgeo import gdalconst
from osgeo import gdal_array
from osgeo import gdal
except ImportError:
print('ERROR: GDAL is not successfully installed, please check and retry!')
else:
print('GDAL-%s has been installed!' % osgeo.__version__)
# 3. numpy
try:
import numpy
except ImportError:
print('ERROR: NumPy is not successfully installed, please check and retry!')
else:
print('NumPy-%s has been installed!' % numpy.__version__)
# 4. pymongo
try:
import pymongo
from pymongo import MongoClient
except ImportError:
print('ERROR: pymongo is not successfully installed, please check and retry!')
else:
print('pymongo-%s has been installed!' % pymongo.__version__)
# 5. networkx
try:
import networkx
except ImportError:
print('ERROR: networkx is not successfully installed, please check and retry!')
else:
print('networkx-%s has been installed!' % networkx.__version__)
# 6. shapely
try:
import shapely
except ImportError:
print('ERROR: shapely is not successfully installed, please check and retry!')
else:
print('shapely-%s has been installed!' % shapely.__version__)
# 7. matplotlib
try:
import matplotlib
except ImportError:
print('ERROR: matplotlib is not successfully installed, please check and retry!')
else:
print('matplotlib-%s has been installed!' % matplotlib.__version__)
# 8. deap
try:
import deap
except ImportError:
print('ERROR: deap is not successfully installed, please check and retry!')
else:
print('deap-%s has been installed!' % deap.__version__)
# 9. scoop
try:
import scoop
except ImportError:
print('ERROR: scoop is not successfully installed, please check and retry!')
else:
print('scoop-%s.%s has been installed!' % (scoop.__version__, scoop.__revision__))
| gpl-3.0 |
PatrickChrist/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
florentchandelier/zipline | zipline/pipeline/loaders/utils.py | 5 | 15156 | import datetime
import numpy as np
import pandas as pd
from zipline.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME
from zipline.utils.numpy_utils import categorical_dtype
from zipline.utils.pandas_utils import mask_between_time
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates,
event_timestamps,
event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert len(event_sids) == len(event_dates) == len(event_timestamps), \
"mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = all_dates.searchsorted(event_timestamps)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = all_dates.searchsorted(eff_dts)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
_midnight = datetime.time(0, 0)
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
# There is a pandas bug (0.18.1) where if the timestamps in a
# normalized DatetimeIndex are not sorted and one calls `tz_localize(None)`
# on tha DatetimeIndex, some of the dates will be shifted by an hour
# (similarly to the previously mentioned bug). Therefore, we must sort
# the df here to ensure that we get the normalize correctly.
df.sort_values(ts_field, inplace=True)
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(
dtidx_local_time,
time,
_midnight,
include_end=False,
)
# For all of the times that are greater than our query time add 1
# day and truncate to the date.
# We normalize twice here because of a bug in pandas 0.16.1 that causes
# tz_localize() to shift some timestamps by an hour if they are not grouped
# together by DST/EST.
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
def last_in_date_group(df,
dates,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
dates : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [dates[dates.searchsorted(
df[TS_FIELD_NAME].values.astype('datetime64[D]')
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=dates,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(dates)
return last_in_group
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype)
| apache-2.0 |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py | 69 | 42525 | """
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
| agpl-3.0 |
anisyonk/pilot | HPC/pandayoda/yodaexe/Droid.py | 3 | 24419 | import commands
import datetime
import json
import logging
import os
import shutil
import socket
import sys
import time
import pickle
import signal
import threading
import traceback
from os.path import abspath as _abspath, join as _join
from Queue import Queue
# logging.basicConfig(filename='Droid.log', level=logging.DEBUG)
from pandayoda.yodacore import Interaction,Database,Logger
from EventServer.EventServerJobManager import EventServerJobManager
from signal_block.signal_block import block_sig, unblock_sig
class Droid(threading.Thread):
def __init__(self, globalWorkingDir, localWorkingDir, rank=None, nonMPIMode=False, reserveCores=0, outputDir=None):
threading.Thread.__init__(self)
self.__globalWorkingDir = globalWorkingDir
self.__localWorkingDir = localWorkingDir
self.__currentDir = None
self.__tmpLog = Logger.Logger(filename='Droid.log')
self.__comm = Interaction.Requester(rank=rank, nonMPIMode=nonMPIMode, logger=self.__tmpLog)
self.__esJobManager = None
self.__isFinished = False
if nonMPIMode:
self.__rank = rank
else:
self.__rank = self.__comm.getRank()
self.__tmpLog.info("Rank %s: Global working dir: %s" % (self.__rank, self.__globalWorkingDir))
if not os.environ.has_key('PilotHomeDir'):
os.environ['PilotHomeDir'] = self.__globalWorkingDir
self.initWorkingDir()
self.__tmpLog.info("Rank %s: Current working dir: %s" % (self.__rank, self.__currentDir))
self.__jobId = None
self.__startTimeOneJobDroid = None
self.__cpuTimeOneJobDroid = None
self.__poolFileCatalog = None
self.__inputFiles = None
self.__copyInputFiles = None
self.__preSetup = None
self.__postRun = None
self.__ATHENA_PROC_NUMBER = 1
self.__firstGetEventRanges = True
self.__outputDir = outputDir
self.__yodaToOS = False
self.reserveCores = reserveCores
self.__hostname = socket.getfqdn()
self.__outputs = Queue()
self.__jobMetrics = {}
self.__stagerThread = None
self.__stop = False
if not nonMPIMode:
signal.signal(signal.SIGTERM, self.stop)
signal.signal(signal.SIGQUIT, self.stop)
signal.signal(signal.SIGSEGV, self.stop)
signal.signal(signal.SIGXCPU, self.stop)
signal.signal(signal.SIGUSR1, self.stop)
signal.signal(signal.SIGBUS, self.stop)
def initWorkingDir(self):
# Create separate working directory for each rank
curdir = _abspath (self.__localWorkingDir)
wkdirname = "rank_%s" % str(self.__rank)
wkdir = _abspath (_join(curdir,wkdirname))
if not os.path.exists(wkdir):
os.makedirs (wkdir)
os.chdir (wkdir)
self.__currentDir = wkdir
def postExecJob(self):
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
localInputFile = os.path.join(os.getcwd(), os.path.basename(inputFile))
self.__tmpLog.debug("Rank %s: Remove input file: %s" % (self.__rank, localInputFile))
os.remove(localInputFile)
if self.__globalWorkingDir != self.__localWorkingDir:
command = "cp -fr " + self.__currentDir + " " + self.__globalWorkingDir
self.__tmpLog.debug("Rank %s: copy files from local working directory to global working dir(cmd: %s)" % (self.__rank, command))
status, output = commands.getstatusoutput(command)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if self.__postRun and self.__esJobManager:
self.__esJobManager.postRun(self.__postRun)
def setup(self, job):
try:
self.__jobId = job.get("JobId", None)
self.__startTimeOneJobDroid = time.time()
self.__cpuTimeOneJobDroid = os.times()
self.__poolFileCatalog = job.get('PoolFileCatalog', None)
self.__inputFiles = job.get('InputFiles', None)
self.__copyInputFiles = job.get('CopyInputFiles', False)
self.__preSetup = job.get('PreSetup', None)
self.__postRun = job.get('PostRun', None)
self.__yodaToOS = job.get('yodaToOS', False)
self.__ATHENA_PROC_NUMBER = int(job.get('ATHENA_PROC_NUMBER', 1))
self.__ATHENA_PROC_NUMBER -= self.reserveCores
if self.__ATHENA_PROC_NUMBER < 0:
self.__ATHENA_PROC_NUMBER = 1
job["AthenaMPCmd"] = "export ATHENA_PROC_NUMBER=" + str(self.__ATHENA_PROC_NUMBER) + "; " + job["AthenaMPCmd"]
self.__jobWorkingDir = job.get('GlobalWorkingDir', None)
if self.__jobWorkingDir:
self.__jobWorkingDir = os.path.join(self.__jobWorkingDir, 'rank_%s' % self.__rank)
if not os.path.exists(self.__jobWorkingDir):
os.makedirs(self.__jobWorkingDir)
os.chdir(self.__jobWorkingDir)
logFile = os.path.join(self.__jobWorkingDir, 'Droid.log')
logging.basicConfig(filename=logFile, level=logging.DEBUG)
self.__tmpLog = Logger.Logger()
if self.__copyInputFiles and self.__inputFiles is not None and self.__poolFileCatalog is not None:
for inputFile in self.__inputFiles:
shutil.copy(inputFile, './')
pfc_name = os.path.basename(self.__poolFileCatalog)
pfc_name = os.path.join(os.getcwd(), pfc_name)
pfc_name_back = pfc_name + ".back"
shutil.copy2(self.__poolFileCatalog, pfc_name_back)
with open(pfc_name, 'wt') as pfc_out:
with open(pfc_name_back, 'rt') as pfc_in:
for line in pfc_in:
pfc_out.write(line.replace('HPCWORKINGDIR', os.getcwd()))
job["AthenaMPCmd"] = job["AthenaMPCmd"].replace('HPCWORKINGDIR', os.getcwd())
self.__esJobManager = EventServerJobManager(self.__rank, self.__ATHENA_PROC_NUMBER, workingDir=self.__jobWorkingDir)
status, output = self.__esJobManager.preSetup(self.__preSetup)
if status != 0:
return False, output
status, output = self.startStagerThread(job)
if status != 0:
self.__tmpLog.warning("Rank %s: failed to start stager thread(status: %s, output: %s)" % (self.__rank, status, output))
return False, output
# self.__esJobManager.initMessageThread(socketname='EventService_EventRanges', context='local')
# self.__esJobManager.initTokenExtractorProcess(job["TokenExtractCmd"])
# self.__esJobManager.initAthenaMPProcess(job["AthenaMPCmd"])
ret = self.__esJobManager.init(socketname='EventService_EventRanges', context='local', athenaMPCmd=job["AthenaMPCmd"], tokenExtractorCmd=job["TokenExtractCmd"])
return True, None
except:
errMsg = "Failed to init EventServerJobManager: %s" % str(traceback.format_exc())
self.__esJobManager.terminate()
return False, errMsg
def getJob(self):
request = {'Test':'TEST', 'rank': self.__rank}
self.__tmpLog.debug("Rank %s: getJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
job = output["job"]
if statusCode == 0 and job:
return True, job
return False, None
def startStagerThread(self, job):
self.__tmpLog.debug("Rank %s: initStagerThread: workdir: %s" %(self.__rank, os.getcwd()))
try:
from pandayoda.yodaexe.DroidStager import DroidStager
self.__stagerThread = DroidStager(self.__globalWorkingDir, self.__localWorkingDir, outputs=self.__outputs, job=job, esJobManager=self.__esJobManager, outputDir=self.__outputDir, rank=self.__rank, logger=self.__tmpLog)
self.__stagerThread.start()
return 0, None
except:
self.__tmpLog.warning("Rank %s: Failed to initStagerThread: %s" % (self.__rank, str(traceback.format_exc())))
return -1, str(traceback.format_exc())
def stopStagerThread(self):
self.__tmpLog.debug("Rank %s: stopStagerThread: workdir: %s" %(self.__rank, os.getcwd()))
self.__stagerThread.stop()
self.__tmpLog.debug("Rank %s: waiting stager thread to finish" %(self.__rank))
while not self.__stagerThread.isFinished():
self.updateOutputs()
time.sleep(1)
self.__tmpLog.debug("Rank %s: stager thread finished" %(self.__rank))
def getEventRanges(self, nRanges=1):
#if self.__firstGetEventRanges:
# request = {'nRanges': self.__ATHENA_PROC_NUMBER}
# self.__firstGetEventRanges = False
#else:
# request = {'nRanges': nRanges}
request = {'jobId': self.__jobId, 'nRanges': nRanges}
self.__tmpLog.debug("Rank %s: getEventRanges(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('getEventRanges',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
eventRanges = output['eventRanges']
if statusCode == 0:
return True, eventRanges
return False, None
def updateEventRange(self, output):
try:
eventRangeID = output.split(",")[1]
except Exception, e:
self.__tmpLog.warning("Rank %s: failed to get eventRangeID from output: %s" % (self.__rank, output))
self.__tmpLog.warning("Rank %s: error message: %s" % (self.__rank, str(e)))
status, output = self.copyOutput(output)
if status != 0:
self.__tmpLog.debug("Rank %s: failed to copy output from local working dir to global working dir: %s" % (self.__rank, output))
return False
request = {"eventRangeID": eventRangeID,
'eventStatus': "finished",
"output": output}
self.__tmpLog.debug("Rank %s: updateEventRange(request: %s)" % (self.__rank, request))
retStatus, retOutput = self.__comm.sendRequest('updateEventRange',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, retStatus, retOutput))
if retStatus:
statusCode = retOutput["StatusCode"]
if statusCode == 0:
return True
return False
def dumpUpdates(self, outputs):
timeNow = datetime.datetime.utcnow()
outFileName = 'rank_' + str(self.__rank) + '_' + timeNow.strftime("%Y-%m-%d-%H-%M-%S") + '.dump'
outFileName = os.path.join(self.globalWorkingDir, outFileName)
outFile = open(outFileName,'w')
for eventRangeID,status,output in outputs:
outFile.write('{0} {1} {2}\n'.format(eventRangeID,status,output))
outFile.close()
def updatePandaEventRanges(self, event_ranges):
""" Update an event range on the Event Server """
self.__tmpLog.debug("Updating event ranges..")
try:
test = sys.modules['pUtil']
except:
self.__tmpLog.debug("loading pUtil")
import pUtil
message = ""
#url = "https://aipanda007.cern.ch:25443/server/panda"
url = "https://pandaserver.cern.ch:25443/server/panda"
# eventRanges = [{'eventRangeID': '4001396-1800223966-4426028-1-2', 'eventStatus':'running'}, {'eventRangeID': '4001396-1800223966-4426028-2-2','eventStatus':'running'}]
node={}
node['eventRanges']=json.dumps(event_ranges)
# open connection
ret = pUtil.httpConnect(node, url, path='.', mode="UPDATEEVENTRANGES")
# response = json.loads(ret[1])
status = ret[0]
if ret[0]: # non-zero return code
message = "Failed to update event range - error code = %d, error: " % (ret[0], ret[1])
else:
response = json.loads(json.dumps(ret[1]))
status = int(response['StatusCode'])
message = json.dumps(response['Returns'])
return status, message
def updateOutputs(self, signal=False, final=False):
outputs = []
stagedOutpus = []
while not self.__outputs.empty():
output = self.__outputs.get()
outputs.append(output)
if output['eventStatus'] == 'stagedOut':
stagedOutpus.append({'eventRangeID': output['eventRangeID'], 'eventStatus': 'finished', 'objstoreID': output['objstoreID']})
elif output['eventStatus'].startswith("ERR") and self.__yodaToOS:
stagedOutpus.append({'eventRangeID': output['eventRangeID'], 'eventStatus': 'failed'})
if len(stagedOutpus):
self.__tmpLog.debug("Rank %s: updatePandaEventRanges(request: %s)" % (self.__rank, stagedOutpus))
retStatus, retOutput = self.updatePandaEventRanges(stagedOutpus)
if retStatus == 0:
self.__tmpLog.debug("Rank %s: updatePandaEventRanges(status: %s, output: %s)" % (self.__rank, retStatus, retOutput))
if outputs:
self.__tmpLog.debug("Rank %s: updateEventRanges(request: %s)" % (self.__rank, outputs))
retStatus, retOutput = self.__comm.sendRequest('updateEventRanges',outputs)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, retStatus, retOutput))
return True
def finishJob(self):
if not self.__isFinished:
request = {'jobId': self.__jobId, 'rank': self.__rank, 'state': 'finished'}
self.__tmpLog.debug("Rank %s: finishJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('finishJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
#self.__comm.disconnect()
return True
return False
def failedJob(self):
request = {'jobId': self.__jobId, 'rank': self.__rank, 'state': 'failed'}
self.__tmpLog.debug("Rank %s: finishJob(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('finishJob',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def finishDroid(self):
request = {'state': 'finished'}
self.__tmpLog.debug("Rank %s: finishDroid(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('finishDroid',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
self.__comm.disconnect()
return False
def heartbeat(self):
request = self.getAccountingMetrics()
self.__jobMetrics[self.__jobId] = request
self.__tmpLog.debug("Rank %s: heartbeat(request: %s)" % (self.__rank, request))
status, output = self.__comm.sendRequest('heartbeat',request)
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
self.dumpJobMetrics()
if status:
statusCode = output["StatusCode"]
if statusCode == 0:
return True
return False
def getAccountingMetrics(self):
metrics = {}
if self.__esJobManager:
metrics = self.__esJobManager.getAccountingMetrics()
metrics['jobId'] = self.__jobId
metrics['rank'] = self.__rank
if self.__startTimeOneJobDroid:
metrics['totalTime'] = time.time() - self.__startTimeOneJobDroid
else:
metrics['totalTime'] = 0
processedEvents = metrics['processedEvents']
if processedEvents < 1:
processedEvents = 1
metrics['avgTimePerEvent'] = metrics['totalTime'] * metrics['cores'] / processedEvents
return metrics
def dumpJobMetrics(self):
jobMetricsFileName = "jobMetrics-rank_%s.json" % self.__rank
outputDir = self.__currentDir
jobMetrics = os.path.join(outputDir, jobMetricsFileName)
self.__tmpLog.debug("JobMetrics file: %s" % jobMetrics)
tmpFile = open(jobMetrics, "w")
json.dump(self.__jobMetrics, tmpFile)
tmpFile.close()
def pollYodaMessage(self):
self.__tmpLog.debug("Rank %s: pollYodaMessage" % (self.__rank))
if True:
status, output = self.__comm.waitMessage()
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
state = output["State"]
if statusCode == 0 and state == 'finished':
return True
return True
def waitYoda(self):
self.__tmpLog.debug("Rank %s: WaitYoda" % (self.__rank))
while True:
status, output = self.__comm.waitMessage()
self.__tmpLog.debug("Rank %s: (status: %s, output: %s)" % (self.__rank, status, output))
if status:
statusCode = output["StatusCode"]
state = output["State"]
if statusCode == 0 and state == 'finished':
return True
return True
def runOneJob(self):
self.__tmpLog.info("Droid Starts to get job")
status, job = self.getJob()
self.__tmpLog.info("Rank %s: getJob(%s)" % (self.__rank, job))
if not status or not job:
self.__tmpLog.debug("Rank %s: Failed to get job" % self.__rank)
# self.failedJob()
return -1
status, output = self.setup(job)
self.__tmpLog.info("Rank %s: setup job(status:%s, output:%s)" % (self.__rank, status, output))
if not status:
self.__tmpLog.debug("Rank %s: Failed to setup job(%s)" % (self.__rank, output))
self.failedJob()
return -1
# main loop
failedNum = 0
#self.__tmpLog.info("Rank %s: isDead: %s" % (self.__rank, self.__esJobManager.isDead()))
heartbeatTime = None
self.__tmpLog.info("Rank %s: os.times: %s" % (self.__rank, os.times()))
while not self.__esJobManager.isDead():
#self.__tmpLog.info("Rank %s: isDead: %s" % (self.__rank, self.__esJobManager.isDead()))
#self.__tmpLog.info("Rank %s: isNeedMoreEvents: %s" % (self.__rank, self.__esJobManager.isNeedMoreEvents()))
while self.__esJobManager.isNeedMoreEvents() > 0:
neededEvents = self.__esJobManager.isNeedMoreEvents()
self.__tmpLog.info("Rank %s: need %s events" % (self.__rank, neededEvents))
status, eventRanges = self.getEventRanges(neededEvents)
# failed to get message again and again
if not status:
failedNum += 1
if failedNum > 30:
self.__tmpLog.warning("Rank %s: failed to get events more than 30 times. finish job" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
else:
continue
else:
failedNum = 0
self.__tmpLog.info("Rank %s: get event ranges(%s)" % (self.__rank, eventRanges))
if len(eventRanges) == 0:
self.__tmpLog.info("Rank %s: no more events" % self.__rank)
self.__esJobManager.insertEventRange("No more events")
else:
self.__esJobManager.insertEventRanges(eventRanges)
self.__esJobManager.poll()
self.updateOutputs()
time.sleep(0.001)
if heartbeatTime is None:
self.heartbeat()
heartbeatTime = time.time()
elif time.time() - heartbeatTime > 60:
self.heartbeat()
self.__tmpLog.info("Rank %s: os.times: %s" % (self.__rank, os.times()))
heartbeatTime = time.time()
self.heartbeat()
self.__esJobManager.flushMessages()
self.stopStagerThread()
self.updateOutputs()
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.heartbeat()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
#self.waitYoda()
return self.__esJobManager.getChildRetStatus()
def preCheck(self):
if not os.access('/tmp', os.W_OK):
self.__tmpLog.info("Rank %s: PreCheck /tmp is readonly" % self.__rank)
status, output = commands.getstatusoutput("ll /|grep tmp")
self.__tmpLog.info("Rank %s: tmp dir: %s" % (self.__rank, output))
return 1
return 0
def run(self):
self.__tmpLog.info("Rank %s: Droid starts on %s" % (self.__rank, self.__hostname))
if self.preCheck():
self.__tmpLog.info("Rank %s: Droid failed preCheck, exit" % self.__rank)
return 1
while not self.__stop:
self.__tmpLog.info("Rank %s: Droid starts to run one job" % self.__rank)
os.chdir(self.__globalWorkingDir)
try:
ret = self.runOneJob()
if ret != 0:
self.__tmpLog.warning("Rank %s: Droid fails to run one job: ret - %s" % (self.__rank, ret))
break
except:
self.__tmpLog.warning("Rank %s: Droid throws exception when running one job: %s" % (self.__rank, traceback.format_exc()))
break
os.chdir(self.__globalWorkingDir)
self.__tmpLog.info("Rank %s: Droid finishes to run one job" % self.__rank)
self.finishDroid()
return 0
def stop(self, signum=None, frame=None):
self.__tmpLog.info('Rank %s: stop signal %s received' % (self.__rank, signum))
self.__stop = True
block_sig(signum)
signal.siginterrupt(signum, False)
if self.__esJobManager:
self.__esJobManager.terminate()
self.getAccountingMetrics()
self.dumpJobMetrics()
self.heartbeat()
#self.__esJobManager.terminate()
self.__esJobManager.flushMessages()
self.updateOutputs(signal=True, final=True)
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
#self.__tmpLog.info("Rank %s: finish job" % self.__rank)
#self.finishJob()
self.__tmpLog.info('Rank %s: stop' % self.__rank)
#signal.siginterrupt(signum, True)
unblock_sig(signum)
#sys.exit(0)
def __del_not_use__(self):
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
#self.__esJobManager.terminate()
#self.__esJobManager.flushMessages()
#output = self.__esJobManager.getOutput()
#while output:
# self.__tmpLog.info("Rank %s: get output(%s)" % (self.__rank, output))
# self.updateEventRange(output)
# output = self.__esJobManager.getOutput()
#self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
#self.postExecJob()
self.__esJobManager.flushMessages()
self.updateOutputs(signal=True, final=True)
self.__tmpLog.info("Rank %s: post exec job" % self.__rank)
self.postExecJob()
self.__tmpLog.info("Rank %s: finish job" % self.__rank)
self.finishJob()
self.__tmpLog.info('Rank %s: __del__ function' % self.__rank)
| apache-2.0 |
cbertinato/pandas | pandas/tests/test_common.py | 1 | 3220 | import collections
from functools import partial
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Series, Timestamp
from pandas.core import common as com, ops
def test_get_callable_name():
getname = com.get_callable_name
def fn(x):
return x
lambda_ = lambda x: x # noqa: E731
part1 = partial(fn)
part2 = partial(part1)
class somecall:
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_random_state():
import numpy.random as npr
# Check with seed
state = com.random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com.random_state('test')
with pytest.raises(ValueError):
com.random_state(5.5)
@pytest.mark.parametrize('left, right, expected', [
(Series([1], name='x'), Series([2], name='x'), 'x'),
(Series([1], name='x'), Series([2], name='y'), None),
(Series([1]), Series([2], name='x'), None),
(Series([1], name='x'), Series([2]), None),
(Series([1], name='x'), [2], 'x'),
([1], Series([2], name='y'), 'y')])
def test_maybe_match_name(left, right, expected):
assert ops._maybe_match_name(left, right) == expected
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com.dict_compat(data_datetime64) == expected)
assert (com.dict_compat(expected) == expected)
assert (com.dict_compat(data_unchanged) == data_unchanged)
def test_standardize_mapping():
# No uninitialized defaultdicts
with pytest.raises(TypeError):
com.standardize_mapping(collections.defaultdict)
# No non-mapping subtypes, instance
with pytest.raises(TypeError):
com.standardize_mapping([])
# No non-mapping subtypes, class
with pytest.raises(TypeError):
com.standardize_mapping(list)
fill = {'bad': 'data'}
assert (com.standardize_mapping(fill) == dict)
# Convert instance to type
assert (com.standardize_mapping({}) == dict)
dd = collections.defaultdict(list)
assert isinstance(com.standardize_mapping(dd), partial)
def test_git_version():
# GH 21295
git_version = pd.__git_version__
assert len(git_version) == 40
assert all(c in string.hexdigits for c in git_version)
| bsd-3-clause |
theusual/kaggle-seeclickfix-ensemble | Miroslaw/utils.py | 3 | 2791 | import cPickle as pickle
import datasets, config
import numpy as np
import pandas as pd
from sklearn.externals import joblib
def load_from_cache(filename):
filename = '%s/%s.pkl' % (config.CACHEDIR, filename)
if config.CACHETYPE == 'joblib':
obj = joblib.load(filename)
elif config.CACHETYPE == 'pickle':
f = open(filename)
obj = pickle.load(f)
f.close()
else:
raise ValueError('Unkown CACHETYPE %s, use only pickle or joblib' % config.CACHETYPE)
return obj
def save_to_cache(obj, filename):
filename = '%s/%s.pkl' % (config.CACHEDIR, filename)
if config.CACHETYPE == 'joblib':
joblib.dump(obj, filename, compress=9)
elif config.CACHETYPE == 'pickle':
f = open('cache/%s.pkl' % filename, 'w')
pickle.dump(obj, f, 2)
f.close()
else:
raise ValueError('Unkown CACHETYPE %s, use only pickle or joblib' % config.CACHETYPE)
def create_submission(filename, pred, ids=None):
data = ['id,num_views,num_votes,num_comments']
pred = pred.astype('S100')
if ids is None:
ids = datasets.load_dataset('TestIDS')
for id, p in zip(ids, pred):
data.append('%i,' % (id) + ','.join(p))
data = '\n'.join(data)
f = open('%s/%s' %(config.SUBMITDIR, filename), 'w')
f.write(data)
f.close()
def make_vw(data, targets, filename):
"""
Helper method to create a vowpal wabbit dataset from data and targets and
save it to filename
"""
s = []
for yi, xi in zip(targets, data):
xis = ' '.join(['f%i:%f' % (f, xi[0,f]) for f in xi.nonzero()[1]])
s.append('%f | %s' %(yi, xis))
f = open(filename, 'w')
f.write('\n'.join(s))
f.close()
def greedy_feature_selection(model, features, j):
selected_features = set()
score_hist = []
ycv = exp(y_cv) - 1
while len(selected_features) < len(features):
scores = []
for i in range(len(features)):
if i not in selected_features:
feats = list(selected_features) + [i]
if len(feats) == 1:
ttfs = features[i]
else:
ttfs = data_transforms.drop_disjoint(sparse.hstack((
features[feats])).tocsr(), targets)
X_train_pre = ttfs[:n_train]
X_train = X_train_pre[:int(n_train*0.8)]
X_cv = X_train_pre[int(n_train*0.8):]
model.fit(X_train[-keep:], y_train[-keep:])
cv = exp(ridge.predict(X_cv)) - 1
scores.append((rmsle(postprocess_pred(cv)[:,j], ycv[:,j]), feats, i))
print scores[-1]
selected_features.add(min(scores)[2])
score_hist.append(min(scores))
return score_hist
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/tests/test_multiclass.py | 1 | 23953 | import numpy as np
import scipy.sparse as sp
from sklearn import datasets
from sklearn import svm
from sklearn.externals.six.moves import zip
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovr.partial_fit(iris.data[60:], iris.target[60:])
pred = ovr.predict(iris.data)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| mit |
yianni/rtd-dbg | qsr_lib/src/qsrlib_qsrs/qsr_qtc_c_simplified.py | 1 | 1457 | # -*- coding: utf-8 -*-
"""Example that shows how to implement QSR makers.
:Author: Christan Dondrup <cdondrup@lincoln.ac.uk>
:Organization: University of Lincoln
:Date: 10 September 2014
:Version: 0.1
:Status: Development
:Copyright: STRANDS default
:Notes: future extension to handle polygons, to do that use matplotlib.path.Path.contains_points
although might want to have a read on the following also...
http://matplotlib.1069221.n5.nabble.com/How-to-properly-use-path-Path-contains-point-td40718.html
"""
from __future__ import print_function, division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
class QSR_QTC_C_Simplified(QSR_QTC_Simplified_Abstractclass):
"""Make default QSRs and provide an example for others"""
def __init__(self):
super(QSR_QTC_C_Simplified, self).__init__()
self._unique_id = "qtccs"
self.qtc_type = "c"
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC veriants to select only the parts
from the QTCC tuple that you would like to return.
Example for QTCB: return qtc[0:2]
:param qtc: The full QTCC tuple [q1,q2,q4,q5]
:return: "q1,q2,q4,q5" or {"qtccs": "q1,q2,q4,q5"} if future is True
"""
return self._format_qsr(self.create_qtc_string(qtc))
| mit |
frucci/kaggle_quora_competition | get_phrase_correction.py | 1 | 5953 | import ourfunctions as f
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import re
from string import punctuation
train_df = pd.read_csv("./train.csv")
test_df = pd.read_csv("./test.csv")
print(train_df.shape)
print(test_df.shape)
## adjusting the nan value
train_df.fillna("", inplace=True)
test_df.fillna("", inplace=True)
train_df.info()
import enchant
import enchant
from enchant.checker import SpellChecker
d = enchant.DictWithPWL("en_US")
chkr = SpellChecker(d)
def create_unique_questions(train_df, test_df):
words = list(
set(
list(set(train_df['question1'])) +
list(set(train_df['question2'])) + list(set(test_df['question1']))
+ list(set(test_df['question2']))))
unique_questions = pd.DataFrame(
words, columns=['questions']).reset_index(drop=True)
return unique_questions
unique_questions = create_unique_questions(train_df, test_df)
alreary_corrected = [
"iphones",
"ireland",
"mustn",
"linux",
"wouldn",
"videoes",
"tv",
"google",
"memorise",
"faught",
"cena",
"lesner",
"hollywood",
"anmount",
"hoywood",
"cantonese",
"otherthan",
"mumbai",
"wikipedia",
"textfields",
"ajax",
"pls",
"couldn",
"calcutta",
"doesnt",
"fIght",
"txt",
"whther",
"feelns",
"sudd",
"stl",
"india",
"Plz",
"engg",
"eng",
"olympians",
"offence",
"bulgarians",
"siemens",
"wasn",
"clinton",
"portland",
"recognise",
"adams",
"didnt",
"taylor",
"youtube",
"goverment",
"korean",
"paypal",
"isn",
"facebook",
"mhz",
"samsung",
"womans",
"german",
"america",
"mosquitos",
"melbourne",
"dj",
"behaviour",
"hasn",
"phd",
"aren",
"ethernet",
"uk",
"realise",
"brisbane",
"organisation",
"aftr",
"russian",
"nonpolar",
"pc",
"othet",
"nokia",
"boolean",
"analyse",
"centres",
"ramadan",
"latin",
"weren",
"immedietly",
"bollywood",
"conentration",
"benifit",
"oppurtunities",
"filipino",
"netflix",
"indians",
"opensource",
"atlanta",
"microsoft",
"colour",
"cse",
"jane",
"exsts",
"persob",
"centre",
"radeon",
"postgraduation",
"suez",
"illuminati",
"analytics",
"italian",
"excercises",
"favour",
"smartphones",
"shouldn",
"didnot",
"friday",
"monday",
"americans",
"hasn",
"michael",
"verizon",
"hitler",
"fermi",
"whatsapp",
"messagess",
"africa",
"weakneses",
"nikon",
"capricorn",
"romania",
"favourite",
"startups",
"spanish",
"preparegravitation",
"compulsary",
"workin",
"syria",
"immigants",
"benedict",
"legssss",
"france",
"watsup",
"arya",
"handjob",
"europe",
"shoud",
"paypal",
"upto",
"paris",
"sql",
"hitman",
"lagrangian",
"dvd",
"donald",
"enigneering",
"mightn",
"defence",
"iranian",
"increse",
"india",
"hairloss",
"volumetry",
"americans",
"quora",
"eligiblty",
"english",
"indian",
"bangalore",
"emoji",
"ielts",
"ahmedabad",
"frac",
"sociall",
"philippines",
"java",
"intraday",
"mightn",
"delhi",
"saturn",
"youtube",
"noida",
"lynda",
"demonetisation",
"html",
"dissprove",
"nlp",
"nlp",
"rollerblade",
"vlc",
"rolex",
"november",
"indians",
"nflammatories",
"absorps",
"kat",
"ibm",
"centra",
"centra",
"uk",
"pdf",
"ebook",
"sydney",
"samsung",
"usa",
"traveller",
"jaipur",
"pablo",
"ebay",
"Ebay",
"EBAY",
"whatsapp",
"imessage",
"slary",
"isis",
"blow",
"eu",
"favourite",
"reactjs",
"pakistan",
"stanford",
"harvard",
"wharton",
"saturn",
"existance",
"gb",
"poeple",
"forex",
"katrina",
"decison",
"snapchat",
"rollerblade",
"mba",
"anime",
"disney",
"schengen",
"singapore",
"ramayan",
"gmail",
"madheshi",
"germany",
"instagram",
"connecticut",
"php",
"reaso",
"japanese",
"gf",
"mumbai",
"robert",
"linkedin",
"maharashtrian",
"bollywood",
"enginnering",
"rattata",
"magikarp",
"islam",
"atleast",
"atleast",
"schengen",
"engeneering",
"casanova",
"feelngs",
"photoshop",
"canada",
"holland",
"hollywood",
"chelsea",
"modernizaton",
"instagrammer",
"thailand",
"chinese",
"corrrect",
"hillary",
"china",
"switzerland",
"imrovement",
"kms",
"undergraduation",
"qoura",
"actived",
"calender",
"bestfriend",
"dna",
"latop",
"permantley",
"connectionn",
"sylabus",
"insititute",
"sequrity",
"addmision",
"begineer",
"wtiter",
"litrate",
"programing",
"programmning",
"demonitization",
"intially",
"unseccessful",
"quikly",
]
i = 0
for q2 in unique_questions.questions.values:
i += 1
chkr.set_text(q2)
for err in chkr:
if not sum([c.isupper() for c in err.word]):
error = err.word
sugs = chkr.suggest(error)
cond = True
if len(sugs) > 2:
cond = (len(sugs[0].split()) == 1) and ('-' not in sugs[1]) and (len(sugs) !=0)
if cond and (error not in alreary_corrected):
print(q2)
print(err.word)
print(sugs, '\n\n')
| gpl-3.0 |
Johanu/MDAnalysis_scripts | SPRX_position.py | 1 | 1652 | from __future__ import division
import matplotlib.pyplot as plt
# import numpy as np
import MDAnalysis as md
# from MDAnalysis import *
# from MDAnalysis.analysis.distances import dist
def calculate_dists(gro_file, xtc_file, ligand_name):
u = md.Universe(gro_file, xtc_file)
select_ligand = u.selectAtoms("resname " + ligand_name)
select_res31 = u.selectAtoms("backbone and resnum 31")
COM_distance = []
print gro_file[:-4], select_ligand
for i in u.trajectory:
ligand_COM = select_ligand.centerOfMass()
res31_COM = select_res31.centerOfMass()
#COM_distance.append(res31_COM[2] - ligand_COM[2])
COM_distance.append(ligand_COM[2] - res31_COM[2])
print max(COM_distance), COM_distance.index(max(COM_distance))
print min(COM_distance), COM_distance.index(min(COM_distance))
return COM_distance
SPRP = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRP/minim_new.gro', './WT_2HSP/ligands/WT_2HSP_SPRP/equil_new.xtc', 'SPR')
SPRC = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRC/minim.gro', './WT_2HSP/ligands/WT_2HSP_SPRC/equil.xtc', 'SPRC')
SPRN = calculate_dists('./WT_2HSP/ligands/WT_2HSP_SPRN/minim.gro', './WT_2HSP/ligands/WT_2HSP_SPRN/equil.xtc', 'SPRN')
x_vals = [x / 100 for x in range(0, len(SPRP))]
x_SPRN = [x / 100 for x in range(0, len(SPRN))]
plt.plot(x_vals, SPRP, label='SPRP')
plt.plot(x_vals, SPRC, label='SPRC')
plt.plot(x_SPRN, SPRN, label='SPRN')
leg = plt.legend(ncol=3, loc=9, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.xlabel('Time / ns')
plt.ylabel(ur'Z difference to resnum 31 COM / $\AA$')
# plt.show()
plt.savefig('SPRX_COM.png', dpi=300)
plt.close()
| mit |
thangbui/sparseGP_powerEP | python/sgp/tests/test_PEP_GPy.py | 1 | 3288 | from ..pep.PEP_reg import PEP
import GPy
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
np.random.seed(20)
N = 20 # Number of data points
M = 2 # Number of inducing points
X = np.c_[np.linspace(0, 10, N)] # Data X values
X_B = np.c_[(np.max(X)-np.min(X))*np.random.uniform(0, 1, M)+np.min(X)] + 2
lik_noise_var = 0.1
X_T = np.c_[np.linspace(0,10, 100)] # use the same covariance matrix!
X_X_T = np.vstack((X, X_T))
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
Y_full = np.c_[np.random.multivariate_normal(
np.zeros(X_X_T.shape[0]),
k.K(X_X_T)+np.eye(X_X_T.shape[0])*lik_noise_var)]
Y = np.c_[Y_full[:N]]
Y_T = np.c_[Y_full[N:]]
plt.figure(figsize=(20,10))
plt.scatter(X, Y, color='k')
X_plot = np.c_[np.linspace(-2, 12, 500)]
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'VFE'
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
model.Z.unconstrain()
model.optimize('bfgs', messages=True, max_iters=2e3)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'g', label='VFE')
plt.plot(X_plot, m+2*np.sqrt(V),'g--')
plt.plot(X_plot, m-2*np.sqrt(V),'g--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='g')
vfe_lml = model.log_likelihood()
print 'VFE: ', vfe_lml
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'FITC'
model.inference_method = GPy.inference.latent_function_inference.FITC()
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
model.Z.unconstrain()
model.optimize('bfgs', messages=True, max_iters=2e3)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'b', label='FITC')
plt.plot(X_plot, m+2*np.sqrt(V),'b--')
plt.plot(X_plot, m-2*np.sqrt(V),'b--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='b')
fitc_lml = model.log_likelihood()
print 'FITC: ', fitc_lml
alpha = 0.5
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=X_B)
model.name = 'POWER-EP'
model.inference_method = PEP(alpha=alpha)
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
# print model.checkgrad()
model.optimize('bfgs', messages=True, max_iters=2e3)
# model.optimize_restarts(num_restarts = 10)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'r', label='Power-EP, alpha %.2f' % alpha)
plt.plot(X_plot, m+2*np.sqrt(V),'r--')
plt.plot(X_plot, m-2*np.sqrt(V),'r--')
(m_B, V_B) = model.predict(X_B, full_cov=False)
plt.scatter(X_B, m_B, color='r')
pep_lml = model.log_likelihood()
print 'Power EP: ', pep_lml
k = GPy.kern.RBF(input_dim=1, lengthscale=1, variance=1)
model = GPy.models.GPRegression(X,Y,k, noise_var=lik_noise_var)
model.name = 'FULL'
model.Gaussian_noise.variance = lik_noise_var
model.unfix()
# print model.checkgrad()
model.optimize('bfgs', messages=True, max_iters=2e3)
# model.optimize_restarts(num_restarts = 10)
(m, V) = model.predict(X_plot, full_cov=False)
plt.plot(X_plot, m,'k', label='FULL GP')
plt.plot(X_plot, m+2*np.sqrt(V),'k--')
plt.plot(X_plot, m-2*np.sqrt(V),'k--')
full_lml = model.log_likelihood()
print 'FULL: ', full_lml
plt.legend()
plt.show() | gpl-3.0 |
rsivapr/scikit-learn | sklearn/tests/test_multiclass.py | 5 | 13452 | import numpy as np
import warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent
X = np.ones((10, 2))
X[:5, :] = 0
y = [[int(i >= 5), 2, 3] for i in range(10)]
with warnings.catch_warnings(record=True):
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
#y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.65, 0.74), (0.72, 0.84)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = [tuple(l.nonzero()[0]) for l in (Y_proba > 0.5)]
assert_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).nonzero()[1],
np.hstack(clf.predict(X_test)))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 1)
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], (1 + i) % 3)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
mindriot101/bokeh | examples/app/export_csv/main.py | 9 | 1472 | from os.path import dirname, join
import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models.widgets import RangeSlider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
source = ColumnDataSource(data=dict())
def update():
current = df[(df['salary'] >= slider.value[0]) & (df['salary'] <= slider.value[1])].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
slider = RangeSlider(title="Max Salary", start=10000, end=110000, value=(10000, 50000), step=1000, format="0,0")
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
button.callback = CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read())
columns = [
TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
TableColumn(field="years_experience", title="Experience (years)")
]
data_table = DataTable(source=source, columns=columns, width=800)
controls = widgetbox(slider, button)
table = widgetbox(data_table)
curdoc().add_root(row(controls, table))
curdoc().title = "Export CSV"
update()
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
josenavas/qiime | tests/test_make_distance_boxplots.py | 15 | 13962 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
"""Test suite for the make_distance_boxplots.py module."""
from unittest import TestCase, main
from matplotlib.figure import Figure
from numpy.testing import assert_almost_equal
from qiime.make_distance_boxplots import (_cast_y_axis_extrema,
_color_field_states,
make_distance_boxplots,
_sort_distributions)
class MakeDistanceBoxplotsTests(TestCase):
"""Tests for the make_distance_boxplots.py module."""
def setUp(self):
"""Define some sample data that will be used by the tests."""
self.map_f = map_lines.split('\n')
self.dm_f = dm_lines.split('\n')
self.too_many_colors_map_f = too_many_colors_map_lines.split('\n')
def test_cast_y_axis_extrema(self):
"""Test correctly assigns colors to a field based on another field."""
obs = _cast_y_axis_extrema(1.0)
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema(1)
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('1.0')
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('1')
assert_almost_equal(obs, 1.0)
obs = _cast_y_axis_extrema('auto')
self.assertIsNone(obs)
def test_cast_y_axis_extrema_invalid_input(self):
"""Test correctly raises an error on bad input."""
self.assertRaises(ValueError, _cast_y_axis_extrema, 'foo')
def test_color_field_states(self):
"""Test correctly assigns colors to a field based on another field."""
# All sample IDs and field states.
exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0)],
{'y': (0.0, 0.0, 1.0), 'x': (1.0, 0.0, 0.0)})
obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],
'Foo', ['a', 'b', 'c'], 'Bar')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['x'], exp[1]['x'])
assert_almost_equal(obs[1]['y'], exp[1]['y'])
# Subset of sample IDs and field states.
exp = ([(1.0, 0.0, 0.0)], {'x': (1.0, 0.0, 0.0)})
obs = _color_field_states(self.map_f, ['1', '2'], 'Foo', ['a'], 'Bar')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['x'], exp[1]['x'])
# Color field by itself (useless but still allowed).
exp = ([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.9490196078431372,
0.45098039215686275, 0.01568627450980392)], {'a':
(1.0, 0.0, 0.0),
'c': (0.9490196078431372, 0.45098039215686275,
0.01568627450980392), 'b': (0.0, 0.0, 1.0)})
obs = _color_field_states(self.map_f, ['1', '2', '3', '4', '5', '6'],
'Foo', ['a', 'b', 'c'], 'Foo')
self.assertEqual(exp[0], obs[0])
assert_almost_equal(obs[1]['a'], exp[1]['a'])
assert_almost_equal(obs[1]['b'], exp[1]['b'])
assert_almost_equal(obs[1]['c'], exp[1]['c'])
def test_color_field_states_invalid_input(self):
"""Test correctly raises error on invalid input."""
# Field to color not in mapping file.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Fooz', ['a', 'b'], 'Bar')
# Field to color by not in mapping file.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Foo', ['a', 'b'], 'Barz')
# Field states are not found in field (due to subset of sample IDs).
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5'], 'Foo', ['a', 'c'], 'Bar')
# Field states are not found in field (not in column at all).
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'z'], 'Bar')
# Not enough colors.
samp_ids = [str(i) for i in range(1, 31)]
self.assertRaises(ValueError, _color_field_states,
self.too_many_colors_map_f, samp_ids, 'Description', samp_ids,
'Description')
# No one-to-one mapping.
self.assertRaises(ValueError, _color_field_states, self.map_f,
['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'b'], 'Baz')
def test_make_distance_boxplots(self):
"""Test correctly generates plot, raw data, and labels."""
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo', 'Bar'])
self.assertEqual(len(obs), 2)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(obs[0][4], [None, None, None, None, None, None, None])
self.assertEqual(obs[1][0], 'Bar')
self.assertTrue(isinstance(obs[1][1], Figure))
self.assertEqual(len(obs[1][2]), 5)
self.assertEqual(len(obs[1][3]), 5)
self.assertEqual(obs[1][4], [None, None, None, None, None])
def test_make_distance_boxplots_suppress_plots(self):
"""Test correctly suppresses different plot types."""
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 4)
self.assertEqual(len(obs[0][3]), 4)
self.assertEqual(obs[0][4], [None, None, None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 3)
self.assertEqual(len(obs[0][3]), 3)
self.assertEqual(obs[0][4], [None, None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True,
suppress_individual_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 1)
self.assertEqual(len(obs[0][3]), 1)
self.assertEqual(obs[0][4], [None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_all_within=True,
suppress_all_between=True,
suppress_individual_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 2)
self.assertEqual(len(obs[0][3]), 2)
self.assertEqual(obs[0][4], [None, None])
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
suppress_individual_within=True,
suppress_individual_between=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 2)
self.assertEqual(len(obs[0][3]), 2)
self.assertEqual(obs[0][4], [None, None])
def test_make_distance_boxplots_box_color(self):
"""Test correctly colors boxes in a variety of ways."""
# Single box color for all.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
box_color='r')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 5)
self.assertEqual(len(obs[0][3]), 5)
self.assertEqual(obs[0][4], ['r', 'r', 'r', 'r', 'r'])
# Single box color, with some plots suppressed.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Bar'],
box_color='r',
suppress_individual_within=True)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Bar')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 3)
self.assertEqual(len(obs[0][3]), 3)
self.assertEqual(obs[0][4], ['r', 'r', 'r'])
# Color individual within boxes.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo'],
color_individual_within_by_field='Bar')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(len(obs[0][4]), 7)
self.assertIsNone(obs[0][4][0])
self.assertIsNone(obs[0][4][1])
self.assertEqual(obs[0][4][2], (1.0, 0.0, 0.0))
self.assertEqual(obs[0][4][3], (0.0, 0.0, 1.0))
self.assertIsNone(obs[0][4][4])
self.assertIsNone(obs[0][4][5])
self.assertIsNone(obs[0][4][6])
# Color individual within boxes, make sure box_color is ignored.
obs = make_distance_boxplots(self.dm_f, self.map_f, ['Foo'],
box_color='pink', color_individual_within_by_field='Bar')
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0][0], 'Foo')
self.assertTrue(isinstance(obs[0][1], Figure))
self.assertEqual(len(obs[0][2]), 7)
self.assertEqual(len(obs[0][3]), 7)
self.assertEqual(len(obs[0][4]), 7)
self.assertIsNone(obs[0][4][0])
self.assertIsNone(obs[0][4][1])
self.assertEqual(obs[0][4][2], (1.0, 0.0, 0.0))
self.assertEqual(obs[0][4][3], (0.0, 0.0, 1.0))
self.assertIsNone(obs[0][4][4])
self.assertIsNone(obs[0][4][5])
self.assertIsNone(obs[0][4][6])
def test_make_distance_boxplots_invalid_input(self):
"""Test correctly raises an error on invalid input."""
# No fields provided.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, None)
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, [])
# Nonexistent field.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'foobarbaz'])
# Invalid width/height.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], width=-1, height=5)
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], width=1, height=0)
# Suppress everything.
self.assertRaises(ValueError, make_distance_boxplots, self.dm_f,
self.map_f, ['Foo', 'Bar'], suppress_all_within=True,
suppress_all_between=True, suppress_individual_within=True,
suppress_individual_between=True)
def test_sort_distributions_median(self):
"""Test correctly sorts distributions by median."""
exp = [([0, 0, 0, 1], [2, 1, 1], [1], [1, 2, 3]),
('bar', 'baz', 'zab', 'foo'), ('b', 'r', 'b', 'w')]
obs = _sort_distributions(
[[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],
['foo', 'baz', 'bar', 'zab'], ['w', 'r', 'b', 'b'], 'median')
self.assertEqual(obs, exp)
def test_sort_distributions_alphabetical(self):
"""Test correctly sorts distributions alphabetically."""
exp = [([2, 1, 1], [1, 2, 3], [0, 0, 0, 1], [1]),
('baz', 'foo', 'foo', 'zab'), ('r', 'w', 'b', 'b')]
obs = _sort_distributions(
[[1, 2, 3], [2, 1, 1], [0, 0, 0, 1], [1]],
['foo', 'baz', 'foo', 'zab'], ['w', 'r', 'b', 'b'], 'alphabetical')
self.assertEqual(obs, exp)
def test_sort_distributions_invalid_input(self):
"""Correctly raises error on invalid input."""
# Unfortunately, this code doesn't support the brosort algorithm... :(
with self.assertRaises(ValueError):
_ = _sort_distributions([[1, 2, 3], [3, 2, 1]], ['foo', 'bar'],
['r', 'b'], 'brosort')
map_lines = """#SampleID\tFoo\tBar\tBaz\tDescription
1\ta\tx\tm\t1
2\tb\ty\tn\t2
3\ta\tx\tm\t3
4\ta\tx\tn\t4
5\tb\ty\tn\t5
6\tc\tx\tm\t6"""
dm_lines = """\t1\t2\t3\t4\t5\t6
1\t0\t1\t2\t4\t7\t11
2\t1\t0\t3\t5\t8\t12
3\t2\t3\t0\t6\t9\t13
4\t4\t5\t6\t0\t10\t14
5\t7\t8\t9\t10\t0\t15
6\t11\t12\t13\t14\t15\t0"""
too_many_colors_map_lines = """#SampleID\tDescription
1\t1
2\t2
3\t3
4\t4
5\t5
6\t6
7\t7
8\t8
9\t9
10\t10
11\t11
12\t12
13\t13
14\t14
15\t15
16\t16
17\t17
18\t18
19\t19
20\t20
21\t21
22\t22
23\t23
24\t24
25\t25
26\t26
27\t27
28\t28
29\t29
30\t30"""
if __name__ == "__main__":
main()
| gpl-2.0 |
srjit/fakenewschallange | code/python/vectorization_script.py | 1 | 1973 | from gensim import models
import pandas as pd
import numpy as np
import featureengineering as pp
filename = "../../data/sample.csv"
data = pd.read_csv(filename, sep=',')
data['header_features'] = data.Headline.apply(lambda x : pp.process(x))
data['content_features'] = data.articleBody.apply(lambda x : pp.process(x))
def sent2vec(words):
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
## create the header vector
header_vectors = np.zeros((data.shape[0], 300))
for i, q in enumerate(data.header_features.values):
header_vectors[i, :] = sent2vec(q)
header_series = pd.Series(header_vectors)
data['header_vector'] = header_series.values
## create the content vector
content_vectors = np.zeros((data.shape[0], 300))
for i, q in enumerate(data.question2.values):
content_vectors[i, :] = sent2vec(q)
content_series = pd.Series(content_vectors)
data['content_vector'] = content_series.values
model = models.Word2Vec.load_word2vec_format('/media/sree/venus/code/word2vec/GoogleNews-vectors-negative300.bin', binary=True)
# model = KeyedVectors.load_word2vec_format('data/GoogleNews-vectors-negative300.bin.gz', binary=True)
data['wmd'] = data.apply(lambda x: model.wmdistance(x['header_features'], x['content_features']), axis=1)
data['header_vectors'] = data.header_features.apply(lambda x : sent2vec(x))
data['content_vectors'] = data.header_features.apply(lambda x : sent2vec(x))
## Word2Vec WMD Distance
for stance_level in np.unique(data.Stance):
filtered_rows = data[(data.Stance == stance_level)]
print("Statistics for group : " + stance_level)
## range of wmds
group_max_wmd = np.max(filtered_rows.wmd)
group_min_wmd = np.min(filtered_rows.wmd)
print("Max wmd for range : " , group_max_wmd)
print("Min wmd for range : " , group_min_wmd)
| gpl-3.0 |
rgommers/scipy | scipy/ndimage/filters.py | 12 | 55835 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections.abc import Iterable
import warnings
import numpy
import operator
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
def _invalid_origin(origin, lenw):
return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
"""Complex convolution via a linear combination of real convolutions."""
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input and complex_weights:
# real component of the output
func(input.real, weights.real, output=output.real,
cval=numpy.real(cval), **kwargs)
output.real -= func(input.imag, weights.imag, output=None,
cval=numpy.imag(cval), **kwargs)
# imaginary component of the output
func(input.real, weights.imag, output=output.imag,
cval=numpy.real(cval), **kwargs)
output.imag += func(input.imag, weights.real, output=None,
cval=numpy.imag(cval), **kwargs)
elif complex_input:
func(input.real, weights, output=output.real, cval=numpy.real(cval),
**kwargs)
func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
**kwargs)
else:
if numpy.iscomplexobj(cval):
raise ValueError("Cannot provide a complex-valued cval when the "
"input is real.")
func(input, weights.real, output=output.real, cval=cval, **kwargs)
func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
return output
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights:
weights = weights.conj()
weights = weights.astype(numpy.complex128, copy=False)
kwargs = dict(axis=axis, mode=mode, origin=origin)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(correlate1d, input, weights,
output, cval, **kwargs)
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = normalize_axis_index(axis, input.ndim)
if _invalid_origin(origin, len(weights)):
raise ValueError('Invalid origin; origin must satisfy '
'-(len(weights) // 2) <= origin <= '
'(len(weights)-1) // 2')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
weights = numpy.asarray(weights)
if weights.dtype.kind == 'c':
# pre-conjugate here to counteract the conjugation in correlate1d
weights = weights.conj()
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1-D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
exponent_range = numpy.arange(order + 1)
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius+1)
phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.sum()
if order == 0:
return phi_x
else:
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = numpy.zeros(order + 1)
q[0] = 1
D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
for _ in range(order):
q = Q_deriv.dot(q)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""1-D Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode_reflect)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-D Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-D Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using Gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights and not convolution:
# As for numpy.correlate, conjugate weights rather than input.
weights = weights.conj()
kwargs = dict(
mode=mode, origin=origin, convolution=convolution
)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(_correlate_or_convolve, input,
weights, output, cval, **kwargs)
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if _invalid_origin(origin, lenw):
raise ValueError('Invalid origin; origin must satisfy '
'-(weights.shape[k] // 2) <= origin[k] <= '
'(weights.shape[k]-1) // 2')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError("A sequence of modes is not supported")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
result : ndarray
The result of correlation of `input` with `weights`.
See Also
--------
convolve : Convolve an image with a kernel.
Examples
--------
Correlation is the process of moving a filter mask often referred to
as kernel over the image and computing the sum of products at each location.
>>> from scipy.ndimage import correlate
>>> input_img = np.arange(25).reshape(5,5)
>>> print(input_img)
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]
[15 16 17 18 19]
[20 21 22 23 24]]
Define a kernel (weights) for correlation. In this example, it is for sum of
center and up, down, left and right next elements.
>>> weights = [[0, 1, 0],
... [1, 1, 1],
... [0, 1, 0]]
We can calculate a correlation result:
For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
>>> correlate(input_img, weights)
array([[ 6, 10, 15, 20, 24],
[ 26, 30, 35, 40, 44],
[ 51, 55, 60, 65, 69],
[ 76, 80, 85, 90, 94],
[ 96, 100, 105, 110, 114]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the N-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e., where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`) are treated as zeros.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
complex_output = input.dtype.kind == 'c'
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
if not complex_output:
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
else:
_nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
numpy.real(cval), origin)
_nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
numpy.imag(cval), origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multidimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input,
complex_output=input.dtype.kind == 'c')
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported for non-separable "
"footprints")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported by non-separable rank "
"filters")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
rank = operator.index(rank)
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
See also
--------
scipy.signal.medfilt2d
Notes
-----
For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
the specialised function `scipy.signal.medfilt2d` may be faster. It is
however limited to constant mode with ``cval=0``.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a 1-D filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1-D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = normalize_axis_index(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multidimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1-D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/plotting/_timeseries.py | 14 | 11013 | # TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.core.indexes.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.io.formats.printing import pprint_thing
import pandas.compat as compat
from pandas.plotting._converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter,
TimeSeries_TimedeltaFormatter)
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, series.index)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = getattr(series.resample('D'), how)().dropna()
series = getattr(series.resample(ax_freq), how)().dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.plotting._core import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(),
series.values, **kwds)[0])
labels.append(pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s
def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
# handle index specific formatting
# Note: DatetimeIndex does not use this
# interface. DatetimeIndex uses matplotlib.date directly
if isinstance(index, PeriodIndex):
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: (
"t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
elif isinstance(index, TimedeltaIndex):
subplot.xaxis.set_major_formatter(
TimeSeries_TimedeltaFormatter())
else:
raise TypeError('index type not supported')
pylab.draw_if_interactive()
| mit |
pydupont/PersonalPythonLibs | PersonalLibs/CodonSplitter.py | 1 | 8324 | from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
import pandas as pd
import math
import sys, os
class GenCode:
"""
Class built to deal with genetic code creation and access
:ivar gencode: the genetic code codon => aa
:ivar inverted_gencode: the inverted genetic code aa => list of codons
:ivar F_classes: the F class for the expected number of codons computation. dict(number of codons: list of aa)
"""
gencode = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W'}
def __init__(self):
self.inverted_gencode = self.__inverted_gencode()
self.F_classes = self.__build_F_classes()
def __inverted_gencode(self):
"""
Build the inverted genetic code: dict(aa:list of codons)
"""
d = {}
for codon, aa in GenCode.gencode.items():
if aa not in d: d[aa] = set()
d[aa].add(codon)
return d
def get_aa(self, codon):
"""
Get the amino acid corresponding to a given codon
:param str codon: a codon
:returns: amino acid
:rtype: str
"""
if codon.upper() in GenCode.gencode: return GenCode.gencode[codon]
return 'X'
def get_synonymous_codons(self, codon):
"""
Returns all the codons coding for the same amino acid as the given codon
:param str codon: a codon
:returns: a list of synonimous codons
:rtype: list
"""
if "N" in codon.upper(): return []
if codon.upper() not in GenCode.gencode: raise IOError("%s not a codon.\n" % codon)
return self.get_codons(self.get_aa(codon))
def get_codons(self, aa):
"""
Get all the codons of a given amino acid
:param str aa: an amino acid
:returns: a list of codons
:rtype: list
"""
if aa.upper() in self.inverted_gencode: return self.inverted_gencode[aa]
raise IOError("%s is not an amino acide code.\n" % aa)
def __build_F_classes(self):
"""
Build the F_classes for ENC computation. Correspond to a dict where the key is the number of codons coding for an amino acid and the value a list of amino acids
:returns: the F_classes
:rtype: dict
"""
d={}
for aa, codons in self.inverted_gencode.items():
if not len(codons) in d: d[len(codons)] = set()
d[len(codons)].add(aa)
return d
class CodonSplitter:
"""
Splits a list of sequences in codons. All methods to translate and verify translation.
Can be initiated using a file handler, a file path (str), a SeqRecord or a list of SeqRecords
:ivar dict coding_sequences: the coding sequences
:ivar dict codons: the codons of the coding sequences
:ivar dict translated_sequences: the translated coding sequences
:ivar dict __codon_numbers: a dict where the key is a sequence identifier and the value dict mapping each codon to its number in the given sequence
:ivar dict __codon_frequencies: the codon frequencies (same structure as self.__codon_numbers)
:ivar GenCode gencode: the genetic code
"""
def __init__(self, *args):
self.__new()
if type(args[0]) is file:
self.__init_from_fasta_file(args[0])
elif type(args[0]) is str:
if os.path.isfile(args[0]):
with open(args[0]) as inf:
self.__init_from_fasta_file(inf)
else:
raise IOError("Don't know how to initialise the CodonSplitter using %s \n" % str(args))
elif type(args[0]) is SeqRecord:
self.__init_from_one_record(args[0])
elif type(args[0]) is list:
for elt in list:
if type(elt) is SeqRecord: self.__init_from_one_record(elt)
else: raise IOError("Don't know how to initialise the CodonSplitter using %s \n" % str(args))
else:
raise IOError("Don't know how to initialise the CodonSplitter using %s \n" % str(args))
self.find_codons()
self.translate()
def __new(self):
"""
Initiate class variables
"""
self.coding_sequences = {}
self.codons = {}
self.translated_sequences = {}
self.__codon_numbers = None
self.__codon_frequencies = None
self.gencode = GenCode()
def __translate_seq(self, seq):
"""
Translate a sequence
:returns: a translated sequence
:rtype: str
"""
translated_seq = []
for codon in seq:
translated_seq.append(self.gencode.get_aa(codon))
return "".join(translated_seq)
def __find_codons_of_one_seq(self, seq):
"""
Find the codons composition of one sequence
:returns: a list of codons
:rtype: list
"""
codons = []
for i in range(0, len(seq), 3):
codon = seq[i:i + 3]
if len(codon) < 3: break
codons.append(codon)
return codons
def __init_from_fasta_file(self, fil):
"""
Initiate the class from a fasta file
"""
for record in SeqIO.parse(fil, 'fasta'):
self.__init_from_one_record(record)
def __init_from_one_record(self, record):
"""
Initiate the class using a SeqRecord
"""
if record.id not in self.coding_sequences:
self.coding_sequences[record.id] = str(record.seq)
else:
sys.stderr.write("WARNING: Sequence %s present many times. Written only once\n" % record.id)
def find_codons(self):
"""
Find the codon composition of all given sequences
"""
for idt, seq in self.coding_sequences.items():
self.codons[idt] = self.__find_codons_of_one_seq(seq)
def translate(self):
"""
Translate all sequences
"""
for idt, seq in self.codons.items():
self.translated_sequences[idt] = self.__translate_seq(seq)
def codon_numbers(self):
"""
Compute the number of each codons in each sequence.
:returns: a dict where the key is a sequence identifier and the value dict mapping each codon to its number in the given sequence
"""
if self.__codon_numbers is None:
nbs = {}
# total = {}
for idt, codons in self.codons.items():
nbs[idt] = {}
for codon in codons:
if codon not in nbs[idt]: nbs[idt][codon] = 0
nbs[idt][codon] += 1
self.__codon_numbers = dict(nbs)
return dict(self.__codon_numbers)
def codon_frequencies(self):
"""
Compute the codon frequencies from the codon numbers for each sequence
:returns: a dict where the key is a sequence identifier and the value dict mapping each codon to its frequency in the given sequence
"""
if self.__codon_numbers is None:
self.codon_numbers()
if self.__codon_frequencies is None:
self.__codon_frequencies = {}
for idt, codon_numbers in self.__codon_numbers.items():
N = float(sum(codon_numbers.values()))
self.__codon_frequencies[idt] = {k:codon_numbers[k]/N for k in codon_numbers}
return dict(self.__codon_frequencies)
| gpl-2.0 |
lcarlossandberg/Assignment1- | greengraph/map.py | 1 | 1262 | import numpy as np
from io import BytesIO
from matplotlib import image as img
import requests
class Map(object):
def __init__(self, lat, long, satellite=True,zoom=10, size=(400,400), sensor=False):
base="http://maps.googleapis.com/maps/api/staticmap?"
params=dict(
sensor= str(sensor).lower(),
zoom= zoom,
size= "x".join(map(str, size)),
center= ",".join(map(str, (lat, long) )),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"]="satellite"
self.image = requests.get(base, params=params).content
# Fetch our PNG image data
self.pixels= img.imread(BytesIO(self.image))
# Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0]
greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold = 1.1):
return np.sum(self.green(threshold))
| mit |
maropu/spark | python/pyspark/pandas/indexes/__init__.py | 16 | 1065 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.indexes.base import Index # noqa: F401
from pyspark.pandas.indexes.datetimes import DatetimeIndex # noqa: F401
from pyspark.pandas.indexes.multi import MultiIndex # noqa: F401
from pyspark.pandas.indexes.numeric import Float64Index, Int64Index # noqa: F401
| apache-2.0 |
4shadoww/hakkuframework | core/lib/future/utils/__init__.py | 36 | 20238 | """
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
| mit |
MLWave/kepler-mapper | kmapper/plotlyviz.py | 1 | 21625 | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
| mit |
wezteoh/vgg16_tf | vgg16_tf.py | 1 | 5033 | __author__ = "Wei Zhen Teoh"
# This is a tensorflow model of the 16-layer convolutional neural network used
# by the VGG team in the ILSVRC-2014 competition
# I wrote up this model with strong reference to Davi Frossard's post on
# https://www.cs.toronto.edu/~frossard/post/vgg16/
import matplotlib.pyplot as plt
from scipy.misc import imread
from scipy.misc import imresize
import numpy as np
import tensorflow as tf
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=1e-1, dtype=tf.float32)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
# Define network architecture
parameters = []
# input
inputs = tf.placeholder(tf.float32, shape = [None, 224, 224, 3])
# zero-mean input
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
recentred_inputs = inputs-mean
# conv1_1
W_conv1_1 = weight_variable([3, 3, 3, 64])
b_conv1_1 = bias_variable([64])
conv1_1 = tf.nn.relu(conv2d(recentred_inputs, W_conv1_1) + b_conv1_1)
parameters += [W_conv1_1, b_conv1_1]
# conv1_2
W_conv1_2 = weight_variable([3, 3, 64, 64])
b_conv1_2 = bias_variable([64])
conv1_2 = tf.nn.relu(conv2d(conv1_1, W_conv1_2) + b_conv1_2)
parameters += [W_conv1_2, b_conv1_2]
# pool1
pool1 = max_pool_2x2(conv1_2)
# conv2_1
W_conv2_1 = weight_variable([3, 3, 64, 128])
b_conv2_1 = bias_variable([128])
conv2_1 = tf.nn.relu(conv2d(pool1, W_conv2_1) + b_conv2_1)
parameters += [W_conv2_1, b_conv2_1]
# conv2_2
W_conv2_2 = weight_variable([3, 3, 128, 128])
b_conv2_2 = bias_variable([128])
conv2_2 = tf.nn.relu(conv2d(conv2_1, W_conv2_2) + b_conv2_2)
parameters += [W_conv2_2, b_conv2_2]
# pool2
pool2 = max_pool_2x2(conv2_2)
# conv3_1
W_conv3_1 = weight_variable([3, 3, 128, 256])
b_conv3_1 = bias_variable([256])
conv3_1 = tf.nn.relu(conv2d(pool2, W_conv3_1) + b_conv3_1)
parameters += [W_conv3_1, b_conv3_1]
# conv3_2
W_conv3_2 = weight_variable([3, 3, 256, 256])
b_conv3_2 = bias_variable([256])
conv3_2 = tf.nn.relu(conv2d(conv3_1, W_conv3_2) + b_conv3_2)
parameters += [W_conv3_2, b_conv3_2]
# conv3_3
W_conv3_3 = weight_variable([3, 3, 256, 256])
b_conv3_3 = bias_variable([256])
conv3_3 = tf.nn.relu(conv2d(conv3_2, W_conv3_3) + b_conv3_3)
parameters += [W_conv3_3, b_conv3_3]
# pool3
pool3 = max_pool_2x2(conv3_3)
# conv4_1
W_conv4_1 = weight_variable([3, 3, 256, 512])
b_conv4_1 = bias_variable([512])
conv4_1 = tf.nn.relu(conv2d(pool3, W_conv4_1) + b_conv4_1)
parameters += [W_conv4_1, b_conv4_1]
# conv4_2
W_conv4_2 = weight_variable([3, 3, 512, 512])
b_conv4_2 = bias_variable([512])
conv4_2 = tf.nn.relu(conv2d(conv4_1, W_conv4_2) + b_conv4_2)
parameters += [W_conv4_2, b_conv4_2]
# conv4_3
W_conv4_3 = weight_variable([3, 3, 512, 512])
b_conv4_3 = bias_variable([512])
conv4_3 = tf.nn.relu(conv2d(conv4_2, W_conv4_3) + b_conv4_3)
parameters += [W_conv4_3, b_conv4_3]
# pool4
pool4 = max_pool_2x2(conv4_3)
# conv5_1
W_conv5_1 = weight_variable([3, 3, 512, 512])
b_conv5_1 = bias_variable([512])
conv5_1 = tf.nn.relu(conv2d(pool4, W_conv5_1) + b_conv5_1)
parameters += [W_conv5_1, b_conv5_1]
# conv5_2
W_conv5_2 = weight_variable([3, 3, 512, 512])
b_conv5_2 = bias_variable([512])
conv5_2 = tf.nn.relu(conv2d(conv5_1, W_conv5_2) + b_conv5_2)
parameters += [W_conv5_2, b_conv5_2]
# conv5_3
W_conv5_3 = weight_variable([3, 3, 512, 512])
b_conv5_3 = bias_variable([512])
conv5_3 = tf.nn.relu(conv2d(conv5_2, W_conv5_3) + b_conv5_3)
parameters += [W_conv5_3, b_conv5_3]
# pool5
pool5 = max_pool_2x2(conv5_3)
# fc1
W_fc1 = weight_variable([7 * 7 * 512, 4096])
b_fc1 = bias_variable([4096])
pool5_flat = tf.reshape(pool5, [-1, 7*7*512])
fc1 = tf.nn.relu(tf.matmul(pool5_flat, W_fc1) + b_fc1)
parameters += [W_fc1, b_fc1]
# fc2
W_fc2 = weight_variable([4096, 4096])
b_fc2 = bias_variable([4096])
fc2 = tf.nn.relu(tf.matmul(fc1, W_fc2) + b_fc2)
parameters += [W_fc2, b_fc2]
# fc3
W_fc3 = weight_variable([4096, 1000])
b_fc3 = bias_variable([1000])
fc3 = tf.matmul(fc2, W_fc3) + b_fc3
parameters += [W_fc3, b_fc3]
# softmax
probs = tf.nn.softmax(fc3)
# Test run
"""
from imagenet_classes import class_names
if __name__ == '__main__':
with tf.Session() as sess:
# load weights
weights = np.load('vgg16_weights.npz')
keys = sorted(weights.keys())
for i,k in enumerate(keys):
print (i, k, np.shape(weights[k]))
sess.run(parameters[i].assign(weights[k]))
# test image
img1 = imread('laska.png', mode='RGB')
img1 = imresize(img1, (224, 224))
prob = sess.run(probs, feed_dict={inputs: [img1]})[0]
preds = (np.argsort(prob)[::-1])[0:5]
for p in preds:
print (class_names[p], prob[p])
"""
| apache-2.0 |
DSLituiev/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
LLNL/spack | var/spack/repos/builtin/packages/alps/package.py | 4 | 1833 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Alps(CMakePackage):
"""Algorithms for Physics Simulations
Tags: Condensed Matter Physics, Computational Physics
"""
homepage = "https://alps.comp-phys.org"
url = "http://alps.comp-phys.org/static/software/releases/alps-2.3.0-src.tar.gz"
version('2.3.0', sha256='e64208d1e5acdd6f569277413c4867e1fa366cf4a224570eacbf1e9939fca2d2')
# Refs for building from source and recipes
# http://alps.comp-phys.org/mediawiki/index.php/Building_ALPS_from_source
# https://github.com/easybuilders/easybuild-easyconfigs/tree/master/easybuild/easyconfigs/a/ALPS
# https://github.com/conda-forge/alps-feedstock/tree/master/recipe
# Package failed to build with boost version >= 1.64
depends_on('boost@:1.63.0 +chrono +date_time +filesystem +iostreams +mpi +numpy +program_options +python +regex +serialization +system +test +thread +timer')
depends_on('fftw')
depends_on('hdf5 ~mpi+hl')
depends_on('lapack')
# build fails for latest python@3.7
depends_on('python@:3.6.99', type=('build', 'link', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
# build fails with gcc@7:
conflicts('%gcc@7:')
# remove a problematic build variable
patch('mpi.patch')
extends('python')
root_cmakelists_dir = 'alps'
def cmake_args(self):
args = []
args.append('Boost_ROOT_DIR=' + self.spec['boost'].prefix)
args.append("-DCMAKE_CXX_FLAGS={0}".format(self.compiler.cxx98_flag))
return args
| lgpl-2.1 |
raghavrv/scikit-learn | sklearn/tests/test_calibration.py | 64 | 12999 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy import sparse
from sklearn.model_selection import LeaveOneOut
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
def test_calibration_prob_sum():
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0)
clf_prob = CalibratedClassifierCV(clf, method="sigmoid", cv=LeaveOneOut())
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
def test_calibration_less_classes():
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0)
cal_clf = CalibratedClassifierCV(clf, method="sigmoid", cv=LeaveOneOut())
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
assert_array_equal(proba[:, i], np.zeros(len(y)))
assert_equal(np.all(np.hstack([proba[:, :i],
proba[:, i + 1:]])), True)
| bsd-3-clause |
abhitopia/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 18 | 4289 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
bmmalone/as-auto-sklearn | as_asl/sequential_feature_step_selector.py | 1 | 7456 | import logging
logger = logging.getLogger(__name__)
import joblib
import numpy as np
import mlxtend.feature_selection
from sklearn.utils.validation import check_is_fitted
from as_asl.as_asl_ensemble import ASaslPipeline
from as_asl.validate import Validator
import misc.automl_utils as automl_utils
class SequentialFeatureStepSelector:
""" This class uses a greedy, forward sequential feature search
to select the optimal set of feature steps. Conceptually, it is
essentially the same as the SequentialFeatureSelector from mlxtend:
https://rasbt.github.io/mlxtend/user_guide/feature_selection/SequentialFeatureSelector/
However, it includes the following differences:
* It is tailored to work for the feature steps as defined for
ASlib scenarios. Namely, it includes groups of features at
a time, and it ensures feature dependencies are respected.
* It specifically uses PAR10 as the selection criterion.
* It only supports forward search.
* It never places any feature steps on an "exclude" list.
"""
def __init__(self, args, max_feature_steps=np.inf):
self.args = args
self.max_feature_steps = max_feature_steps
def _get_par10(self, feature_steps):
msg = "[SFSS]: *** evaluating feature steps: {} ***".format(feature_steps)
logger.info(msg)
total_par10 = 0.0
total_timeouts = 0
total_solved = 0
total_solver_times = 0.0
total_feature_times = 0.0
total_used_presolving = 0
show = (logger.getEffectiveLevel() == logging.DEBUG)
for fold in self.args.folds:
msg = "[SFSS]: evaluating fold: {}".format(fold)
logger.info(msg)
# first, split the scenario into training and testing
testing, training = self.scenario.get_split(fold)
msg = "[SFSS]: num testing instances: {}".format(len(testing.instances))
logger.debug(msg)
# construct and fit a pipeline with the indicated feature steps
pipeline = ASaslPipeline(
self.args,
feature_steps=feature_steps,
use_random_forests=True
)
pipeline_fit = pipeline.fit(scenario=training)
# now, check the par10 both with and without presolving
schedules = pipeline_fit.create_solver_schedules(testing)
msg = "[SFSS]: length of schedules: {}".format(len(schedules))
logger.debug(msg)
# use par10 to evaluate the pipeline
validator = Validator()
if self.scenario.performance_type[0] == "runtime":
stat = validator.validate_runtime(
schedules=schedules,
test_scenario=testing,
show=show
)
else:
stat = validator.validate_quality(
schedules=schedules,
test_scenario=testing,
show=show
)
total_par10 += stat.par10
total_timeouts += stat.timeouts
total_solved += stat.solved
total_solver_times += stat.solver_times
total_feature_times += stat.feature_times
total = total_timeouts + total_solved
total_par10 = total_par10 / total
msg = [
"",
" feature_steps: {}".format(feature_steps),
" min_par10: {}".format(total_par10),
" total_timeouts: {}".format(total_timeouts),
" total_solved: {}".format(total_solved),
" total_solver_times: {}".format(total_solver_times),
" total_feature_times: {}".format(total_feature_times)
]
msg = "\n".join(msg)
logger.info(msg)
return total_par10
def _find_best_feature_step(self):
""" Based on the current set of included feature steps, find
the next best one to include
"""
best_feature_step = None
best_par10 = np.inf
for feature_step in self.remaining_feature_steps_:
test_feature_steps = self.cur_feature_steps_ + [feature_step]
if not automl_utils.check_all_dependencies(
self.scenario, test_feature_steps):
continue
test_par10 = self._get_par10(test_feature_steps)
if test_par10 < best_par10:
best_par10 = test_par10
best_feature_step = feature_step
return (best_feature_step, best_par10)
def fit(self, scenario):
""" Select the optimal set of feature steps according to PAR10
Parameters
----------
scenario: ASlibScenario
The scenario
Returns
-------
self
"""
self.scenario = scenario
self.cur_feature_steps_ = []
self.cur_par10_ = np.inf
self.trajectory_ = []
# make sure to use a copy
self.remaining_feature_steps_ = list(scenario.feature_steps)
while len(self.cur_feature_steps_) < self.max_feature_steps:
(best_feature_step, best_par10) = self._find_best_feature_step()
if best_par10 > self.cur_par10_:
break
self.cur_feature_steps_.append(best_feature_step)
self.remaining_feature_steps_.remove(best_feature_step)
self.cur_par10_ = best_par10
t = (list(self.cur_feature_steps_), self.cur_par10_)
self.trajectory_.append(t)
self.selected_features_ = automl_utils.extract_feature_names(
self.scenario,
self.cur_feature_steps_
)
# we cannot keep the scenario around for pickling, so forget it
self.scenario = None
return self
def get_selected_feature_steps(self):
""" Get the names of the selected feature steps
"""
check_is_fitted(self, "cur_feature_steps_")
return self.cur_feature_steps_
def get_selected_features(self):
""" Get the names of the selected features based on the steps
"""
check_is_fitted(self, "selected_features_")
return self.selected_features_
def get_transformer(self):
""" Get a ColumnSelector based on the selected feature steps
"""
selected_features = self.get_selected_features()
feature_selector = mlxtend.feature_selection.ColumnSelector(
cols=selected_features)
return feature_selector
def dump(self, filename):
""" A convenience wrapper around joblib.dump
"""
joblib.dump(self, filename)
@classmethod
def load(cls, filename):
""" A convenience wrapper around joblib.load
"""
pipeline = joblib.load(filename)
# and make sure we actually read the correct type of thing
if not isinstance(pipeline, cls):
msg = ("[SFSS.load]: the object at {} is of type {}, "
"but expected type was: {}".format(filename, type(pipeline),
cls))
raise TypeError(msg)
return pipeline
| mit |
aminert/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
sridhar912/tsr-py-faster-rcnn | caffe-fast-rcnn/python/detect.py | 36 | 5734 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| mit |
rdjdejong/Leren2016 | kNN1.py | 1 | 2525 | from __future__ import division
import numpy as np
import pandas as pd
# Finds the most common item
# taken from http://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list
def most_common(lst):
return max(set(lst), key=lst.count)
# simply determines the accuracy of our predicted outputs
def accuracy(lst):
correct = 0
for pair in lst:
if pair[0] == pair[1]:
correct += 1
return correct/len(lst)
class kNN:
''' K-Nearest Neighbor
* In this class, we implemented the KNN-algorithm
'''
test_data = None
train_data = None
# read the data
def __init__(self, fileNameTrain, fileNameTest):
self.train_data = pd.read_csv(fileNameTrain, header = None, sep = ';')\
.values
self.test_data = pd.read_csv(fileNameTest, header = None, sep = ';')\
.values
# determines if we want a single example or take the whole training set
def find_neighbors(self, k, test_case = -1):
if test_case < 0:
outputs = []
for i in range(len(self.test_data[:,0])):
predicted = self.find_neighbor(k, self.test_data[i])
outputs.append((predicted, self.test_data[i, -1]))
return outputs
else:
return self.find_neighbor(k, self.test_data[test_case])
# find the neighbors
def find_neighbor(self, k, test_case):
# calculate the distance
distance = np.power\
((self.train_data[:,:-1] - test_case[:-1]), 2)
distance = np.sum(distance, axis=1)
distance = np.sqrt(distance)
votes = []
# finds the K-Nearest neighbors
for i in range(k):
min_index = np.argmin(distance)
min_vote = self.train_data[min_index, -1]
votes.append(min_vote.item())
distance = np.delete(distance, min_index)
return most_common(votes)
if __name__ == '__main__':
kNN_trainer = kNN('digist123-1.csv', 'digist123-2.csv')
print accuracy(kNN_trainer.find_neighbors(1))
print accuracy(kNN_trainer.find_neighbors(2))
print accuracy(kNN_trainer.find_neighbors(3))
print accuracy(kNN_trainer.find_neighbors(4))
print accuracy(kNN_trainer.find_neighbors(5))
print accuracy(kNN_trainer.find_neighbors(6))
print accuracy(kNN_trainer.find_neighbors(7))
print accuracy(kNN_trainer.find_neighbors(8))
print accuracy(kNN_trainer.find_neighbors(9))
print accuracy(kNN_trainer.find_neighbors(10))
| apache-2.0 |
thorwhalen/ut | stats/classification/analysis.py | 1 | 1238 | """binary classification count results"""
__author__ = 'thor'
import pandas as pd
import itertools
def equality_counts(df):
"""
returns a Series indexed by every True/False combination of observed equalities
of the columns of df, along with the count of this combination.
Example:
criteria source
False False 5063
True 89
True False 23936
True 1293
means that out of all pairs of the rows (i,j) (i != j)of df, having columns "criteria" and
"source",
5063 of these pairs had both criteria_i != criteria_j and source_i != source_j,
89 of these pairs had both criteria_i != criteria_j and source_i == source_j,
23936 of these pairs had both criteria_i == criteria_j and source_i != source_j,
1293 of these pairs had both criteria_i == criteria_j and source_i == source_j
"""
eq_counts = pd.DataFrame()
for i, j in itertools.combinations(range(len(df)), 2):
eq_counts = pd.concat([eq_counts, df.iloc[i] == df.iloc[j]], axis=1)
eq_counts = eq_counts.T.reset_index(drop=True)
return eq_counts.groupby(list(eq_counts.columns)).size()
| mit |
clusterlib/clusterlib | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 5 | 8060 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
fspaolo/scikit-learn | sklearn/cluster/bicluster/tests/test_utils.py | 10 | 1427 | """Tests for bicluster utilities."""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.cluster.bicluster.utils import get_indicators
from sklearn.cluster.bicluster.utils import get_shape
from sklearn.cluster.bicluster.utils import get_submatrix
def test_get_indicators():
rows = [2, 4, 5]
columns = [0, 1, 3]
shape = (6, 4)
row_ind, col_ind = get_indicators(rows, columns, shape)
assert_array_equal(row_ind, [False, False, True, False, True, True])
assert_array_equal(col_ind, [True, True, False, True])
def test_get_shape():
rows = [True, True, False, False]
cols = [True, False, True, True]
assert_equal(get_shape(rows, cols), (2, 3))
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
rows = [True, True, False, False, True]
cols = [False, False, True, True]
for X in (data, csr_matrix(data)):
submatrix = get_submatrix(rows, cols, X)
if issparse(submatrix):
submatrix = submatrix.todense()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.todense()
assert_true(np.all(X != -1))
| bsd-3-clause |
Midafi/scikit-image | setup.py | 11 | 4995 | #! /usr/bin/env python
descr = """Image Processing SciKit
Image processing algorithms for SciPy, including IO, morphology, filtering,
warping, color manipulation, object detection, etc.
Please refer to the online documentation at
http://scikit-image.org/
"""
DISTNAME = 'scikit-image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt'
MAINTAINER_EMAIL = 'stefan@sun.ac.za'
URL = 'http://scikit-image.org'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-image to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKIMAGE_SETUP__ = True
with open('skimage/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skimage')
config.add_data_dir('skimage/data')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install scikit-image when Numpy is not yet
# present in the system.
pass
else:
print('To install scikit-image from source, you will need numpy.\n' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager. For more\n' +
'details, see http://scikit-image.org/docs/stable/install.html')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
# install cython when running setup.py (source install)
setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
},
cmdclass={'build_py': build_py},
**extra
)
| bsd-3-clause |
jimcunderwood/MissionPlanner | Lib/site-packages/scipy/stats/distributions.py | 53 | 207806 | # Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import alltrue, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invnorm', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace',
'skellam'
]
floatinfo = numpy.finfo(float)
errp = special.errprint
arr = asarray
gam = special.gamma
import types
from scipy.misc import doccer
all = alltrue
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array-like
quantiles
q : array-like
lower or upper tail probability
%(shapes)s : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'fit', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
docdict_discrete['example'] = _doc_default_example.replace('[0.9,]',
'Replace with reasonable value')
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _build_random_array(fun, args, size=None):
# Build an array by applying function fun to
# the arguments in args, creating an array with
# the specified shape.
# Allows an integer shape n as a shorthand for (n,).
if isinstance(size, types.IntType):
size = [size]
if size is not None and len(size) != 0:
n = numpy.multiply.reduce(size)
s = apply(fun, args + (n,))
s.shape = size
return s
else:
n = 1
s = apply(fun, args + (n,))
return s[0]
random = mtrand.random_sample
rand = mtrand.rand
random_integers = mtrand.random_integers
permutation = mtrand.permutation
## Internal class to compute a ppf given a distribution.
## (needs cdf function) and uses brentq from scipy.optimize
## to compute ppf from cdf.
class general_cont_ppf(object):
def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14):
self.dist = dist
self.cdf = eval('%scdf'%dist)
self.xa = xa
self.xb = xb
self.xtol = xtol
self.vecfunc = sgf(self._single_call,otypes='d')
def _tosolve(self, x, q, *args):
return apply(self.cdf, (x, )+args) - q
def _single_call(self, q, *args):
return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
def __call__(self, q, *args):
return self.vecfunc(q, *args)
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
## NANs are returned for unsupported parameters.
## location and scale parameters are optional for each distribution.
## The shape parameters are generally required
##
## The loc and scale parameters must be given as keyword parameters.
## These are related to the common symbols in the .lyx file
## skew is third central moment / variance**(1.5)
## kurtosis is fourth central moment / variance**2 - 3
## References::
## Documentation for ranlib, rv2, cdflib and
##
## Eric Wesstein's world of mathematics http://mathworld.wolfram.com/
## http://mathworld.wolfram.com/topics/StatisticalDistributions.html
##
## Documentation to Regress+ by Michael McLaughlin
##
## Engineering and Statistics Handbook (NIST)
## http://www.itl.nist.gov/div898/handbook/index.htm
##
## Documentation for DATAPLOT from NIST
## http://www.itl.nist.gov/div898/software/dataplot/distribu.htm
##
## Norman Johnson, Samuel Kotz, and N. Balakrishnan "Continuous
## Univariate Distributions", second edition,
## Volumes I and II, Wiley & Sons, 1994.
## Each continuous random variable as the following methods
##
## rvs -- Random Variates (alternatively calling the class could produce these)
## pdf -- PDF
## logpdf -- log PDF (more numerically accurate if possible)
## cdf -- CDF
## logcdf -- log of CDF
## sf -- Survival Function (1-CDF)
## logsf --- log of SF
## ppf -- Percent Point Function (Inverse of CDF)
## isf -- Inverse Survival Function (Inverse of SF)
## stats -- Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis
## nnlf -- negative log likelihood function (to minimize)
## fit -- Model-fitting
##
## Maybe Later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
## To define a new random variable you subclass the rv_continuous class
## and re-define the
##
## _pdf method which will be given clean arguments (in between a and b)
## and passing the argument check method
##
## If postive argument checking is not correct for your RV
## then you will also need to re-define
## _argcheck
## Correct, but potentially slow defaults exist for the remaining
## methods but for speed and/or accuracy you can over-ride
##
## _cdf, _ppf, _rvs, _isf, _sf
##
## Rarely would you override _isf and _sf but you could for numerical precision.
##
## Statistics are computed using numerical integration by default.
## For speed you can redefine this using
##
## _stats --- take shape parameters and return mu, mu2, g1, g2
## --- If you can't compute one of these return it as None
##
## --- Can also be defined with a keyword argument moments=<str>
## where <str> is a string composed of 'm', 'v', 's',
## and/or 'k'. Only the components appearing in string
## should be computed and returned in the order 'm', 'v',
## 's', or 'k' with missing values returned as None
##
## OR
##
## You can override
##
## _munp -- takes n and shape parameters and returns
## -- then nth non-central moment of the distribution.
##
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = arr(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array-like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array-like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc: array-like, optioal
location parameter (deafult = 0)
scale : array-like, optional
scale paramter (default = 1)
Returns
-------
a, b: array-like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = arr(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
Lower bound for fixed point calculation for generic ppf.
xb : float, optional
Upper bound for fixed point calculation for generic ppf.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array-like
quantiles
q : array-like
lower or upper tail probability
<shape(s)> : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
Notes
-----
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the
_pdf or the _cdf method (normalized to location 0 and scale 1)
which will be given clean arguments (in between a and b) and
passing the argument check method
If postive argument checking is not correct for your RV
then you will also need to re-define ::
_argcheck
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride ::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override _isf, _sf, and _logsf but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using
_stats
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument moments=<str>
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None
OR
You can override
_munp
takes n and shape parameters and returns
the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(arr(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
pdf : array-like
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logpdf : array-like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
sf : array-like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the Survival function log(1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logsf : array-like
Log of the survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
output = place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
output = place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(arr,(loc,scale))
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
out0 = place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = arr((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args) - 2
fixedn = []
index = range(Nargs) + [-2, -1]
names = ['f%d' % n for n in range(Nargs)] + ['floc', 'fscale']
x0 = args[:]
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
del x0[n]
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
muhat = arr(data).mean()
mu2hat = arr(data).var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead. """
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
output = place(output,cond0,self._entropy()+log(scale))
else:
output = place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
func : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0,name='ksone', longname="Kolmogorov-Smirnov "\
"A one-sided test statistic.", shapes="n",
extradoc="""
General Kolmogorov-Smirnov one-sided test.
"""
)
class kstwobign_gen(rv_continuous):
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0,name='kstwobign', longname='Kolmogorov-Smirnov two-sided (for large N)', extradoc="""
Kolmogorov-Smirnov two-sided test for large N
"""
)
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return log(special.ndtr(x))
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm',longname='A normal',extradoc="""
Normal distribution
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
normal.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
""")
## Alpha distribution
##
class alpha_gen(rv_continuous):
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/arr(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0,name='alpha',shapes='a',extradoc="""
Alpha distribution
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
where Phi(alpha) is the normal CDF, x > 0, and a > 0.
""")
## Anglit distribution
##
class anglit_gen(rv_continuous):
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4,b=pi/4,name='anglit', extradoc="""
Anglit distribution
anglit.pdf(x) = sin(2*x+pi/2) = cos(2*x) for -pi/4 <= x <= pi/4
""")
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0,b=1.0,name='arcsine',extradoc="""
Arcsine distribution
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
""")
## Beta distribution
##
class beta_gen(rv_continuous):
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta',shapes='a, b',extradoc="""
Beta distribution
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(b-1)
for 0 < x < 1, a, b > 0.
""")
## Beta Prime
class betaprime_gen(rv_continuous):
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b',
extradoc="""
Beta prime distribution
betaprime.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b))
* x**(a-1) * (1-x)**(-a-b)
for x > 0, a, b > 0.
""")
## Bradford
##
class bradford_gen(rv_continuous):
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', longname="A Bradford",
shapes='c', extradoc="""
Bradford distribution
bradford.pdf(x,c) = c/(k*(1+c*x))
for 0 < x < 1, c > 0 and k = log(1+c).
""")
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', longname="Burr",
shapes="c, d", extradoc="""
Burr distribution
burr.pdf(x,c,d) = c*d * x**(-c-1) * (1+x**(-c))**(-d-1)
for x > 0.
""")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', longname="Fisk",
shapes='c', extradoc="""
Fisk distribution.
Also known as the log-logistic distribution.
Burr distribution with d=1.
"""
)
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
cauchy = cauchy_gen(name='cauchy',longname='Cauchy',extradoc="""
Cauchy distribution
cauchy.pdf(x) = 1/(pi*(1+x**2))
This is the t distribution with one degree of freedom.
"""
)
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/arr(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= arr(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0,name='chi',shapes='df',extradoc="""
Chi distribution
chi.pdf(x,df) = x**(df-1)*exp(-x**2/2)/(2**(df/2-1)*gamma(df/2))
for x > 0.
"""
)
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0,name='chi2',longname='A chi-squared',shapes='df',
extradoc="""
Chi-squared distribution
chi2.pdf(x,df) = 1/(2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
"""
)
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi,b=pi,name='cosine',extradoc="""
Cosine distribution (approximation to the normal)
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for -pi <= x <= pi.
""")
## Double Gamma distribution
class dgamma_gen(rv_continuous):
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma',longname="A double gamma",
shapes='a',extradoc="""
Double gamma distribution
dgamma.pdf(x,a) = 1/(2*gamma(a))*abs(x)**(a-1)*exp(-abs(x))
for a > 0.
"""
)
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(arr(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull',longname="A double Weibull",
shapes='c',extradoc="""
Double Weibull distribution
dweibull.pdf(x,c) = c/2*abs(x)**(c-1)*exp(-abs(x)**c)
"""
)
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
def _rvs(self, n):
return gamma.rvs(n,size=self._size)
def _arg_check(self, n):
return (n > 0) & (floor(n)==n)
def _pdf(self, x, n):
Px = (x)**(n-1.0)*exp(-x)/special.gamma(n)
return Px
def _logpdf(self, x, n):
return (n-1.0)*log(x) - x - gamln(n)
def _cdf(self, x, n):
return special.gdtr(1.0,n,x)
def _sf(self, x, n):
return special.gdtrc(1.0,n,x)
def _ppf(self, q, n):
return special.gdtrix(1.0, n, q)
def _stats(self, n):
n = n*1.0
return n, n, 2/sqrt(n), 6/n
def _entropy(self, n):
return special.psi(n)*(1-n) + 1 + gamln(n)
erlang = erlang_gen(a=0.0,name='erlang',longname='An Erlang',
shapes='n',extradoc="""
Erlang distribution (Gamma with integer shape parameter)
"""
)
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0,name='expon',longname="An exponential",
extradoc="""
Exponential distribution
expon.pdf(x) = exp(-x)
for x >= 0.
scale = 1.0 / lambda
"""
)
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**arr(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return arr((exm1c)**a)
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**arr(1.0/c)
exponweib = exponweib_gen(a=0.0,name='exponweib',
longname="An exponentiated Weibull",
shapes="a, c",extradoc="""
Exponentiated Weibull distribution
exponweib.pdf(x,a,c) = a*c*(1-exp(-x**c))**(a-1)*exp(-x**c)*x**(c-1)
for x > 0, a, c > 0.
"""
)
## Exponential Power
class exponpow_gen(rv_continuous):
def _pdf(self, x, b):
xbm1 = arr(x**(b-1.0))
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
xb = arr(x**b)
return -expm1(-expm1(xb))
def _sf(self, x, b):
xb = arr(x**b)
return exp(-expm1(xb))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0,name='exponpow',longname="An exponential power",
shapes='b',extradoc="""
Exponential Power distribution
exponpow.pdf(x,b) = b*x**(b-1) * exp(1+x**b - exp(x**b))
for x >= 0, b > 0.
"""
)
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/arr(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/arr((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/arr(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0,name='fatiguelife',
longname="A fatigue-life (Birnbaum-Sanders)",
shapes='c',extradoc="""
Fatigue-life (Birnbaum-Sanders) distribution
fatiguelife.pdf(x,c) = (x+1)/(2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for x > 0.
"""
)
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
# setting xb=1000 allows to calculate ppf for up to q=0.9993
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy',xb=1000,
longname = "A folded Cauchy",
shapes='c',extradoc="""
A folded Cauchy distributions
foldcauchy.pdf(x,c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for x >= 0.
"""
)
## F
class f_gen(rv_continuous):
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = arr(1.0*dfn)
# m = arr(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = arr(dfd*1.0)
v1 = arr(dfn*1.0)
mu = where (v2 > 2, v2 / arr(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0,name='f',longname='An F',shapes="dfn, dfd",
extradoc="""
F distribution
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x,df1,df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for x > 0.
"""
)
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0,name='foldnorm',longname='A folded normal',
shapes='c',extradoc="""
Folded normal distribution
foldnormal.pdf(x,c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for c >= 0.
"""
)
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0,name='frechet_r',longname="A Frechet right",
shapes='c',extradoc="""
A Frechet (right) distribution (also called Weibull minimum)
frechet_r.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
weibull_min = frechet_r_gen(a=0.0,name='weibull_min',
longname="A Weibull minimum",
shapes='c',extradoc="""
A Weibull minimum distribution (also called a Frechet (right) distribution)
weibull_min.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
class frechet_l_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2): sgn = -1
else: sgn = 1
return sgn*val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0,name='frechet_l',longname="A Frechet left",
shapes='c',extradoc="""
A Frechet (left) distribution (also called Weibull maximum)
frechet_l.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
weibull_max = frechet_l_gen(b=0.0,name='weibull_max',
longname="A Weibull maximum",
shapes='c',extradoc="""
A Weibull maximum distribution (also called a Frechet (left) distribution)
weibull_max.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic',
longname="A generalized logistic",
shapes='c',extradoc="""
Generalized logistic distribution
genlogistic.pdf(x,c) = c*exp(-x) / (1+exp(-x))**(c+1)
for x > 0, c > 0.
"""
)
## Generalized Pareto
class genpareto_gen(rv_continuous):
def _argcheck(self, c):
c = arr(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,arr(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,arr(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0,name='genpareto',
longname="A generalized Pareto",
shapes='c',extradoc="""
Generalized Pareto distribution
genpareto.pdf(x,c) = (1+c*x)**(-1-1/c)
for c != 0, and for x >= 0 for all c, and x < 1/abs(c) for c < 0.
"""
)
## Generalized Exponential
class genexpon_gen(rv_continuous):
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0,name='genexpon',
longname='A generalized exponential',
shapes='a, b, c',extradoc="""
Generalized exponential distribution (Ryu 1993)
f(x,a,b,c) = (a+b*(1-exp(-c*x))) * exp(-a*x-b*x+b/c*(1-exp(-c*x)))
for x >= 0, a,b,c > 0.
a, b, c are the first, second and third shape parameters.
References
----------
"The Exponential Distribution: Theory, Methods and Applications",
N. Balakrishnan, Asit P. Basu
"""
)
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme',
longname="A generalized extreme value",
shapes='c',extradoc="""
Generalized extreme value (see gumbel_r for c=0)
genextreme.pdf(x,c) = exp(-exp(-x))*exp(-x) for c==0
genextreme.pdf(x,c) = exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1)
for x <= 1/c, c > 0
"""
)
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return x**(a-1)*exp(-x)/special.gamma(a)
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0,name='gamma',longname='A gamma',
shapes='a',extradoc="""
Gamma distribution
For a = integer, this is the Erlang distribution, and for a=1 it is the
exponential distribution.
gamma.pdf(x,a) = x**(a-1)*exp(-x)/gamma(a)
for x >= 0, a > 0.
"""
)
# Generalized Gamma
class gengamma_gen(rv_continuous):
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma',
longname='A generalized gamma',
shapes="a, c", extradoc="""
Generalized gamma distribution
gengamma.pdf(x,a,c) = abs(c)*x**(c*a-1)*exp(-x**c)/gamma(a)
for x > 0, a > 0, and c != 0.
"""
)
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
longname="A generalized half-logistic",
shapes='c',extradoc="""
Generalized half-logistic
genhalflogistic.pdf(x,c) = 2*(1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for 0 <= x <= 1/c, and c > 0.
"""
)
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz',
longname="A Gompertz (truncated Gumbel) distribution",
shapes='c',extradoc="""
Gompertz (truncated Gumbel) distribution
gompertz.pdf(x,c) = c*exp(x) * exp(-c*(exp(x)-1))
for x >= 0, c > 0.
"""
)
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r',longname="A (right-skewed) Gumbel",
extradoc="""
Right-skewed Gumbel (Log-Weibull, Fisher-Tippett, Gompertz) distribution
gumbel_r.pdf(x) = exp(-(x+exp(-x)))
"""
)
class gumbel_l_gen(rv_continuous):
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l',longname="A left-skewed Gumbel",
extradoc="""
Left-skewed Gumbel distribution
gumbel_l.pdf(x) = exp(x - exp(x))
"""
)
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0,name='halfcauchy',
longname="A Half-Cauchy",extradoc="""
Half-Cauchy distribution
halfcauchy.pdf(x) = 2/(pi*(1+x**2))
for x >= 0.
"""
)
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic',
longname="A half-logistic",
extradoc="""
Half-logistic distribution
halflogistic.pdf(x) = 2*exp(-x)/(1+exp(-x))**2 = 1/2*sech(x/2)**2
for x >= 0.
"""
)
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm',
longname="A half-normal",
extradoc="""
Half-normal distribution
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for x > 0.
"""
)
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant',longname="A hyperbolic secant",
extradoc="""
Hyperbolic secant distribution
hypsecant.pdf(x) = 1/pi * sech(x)
"""
)
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
longname="A Gauss hypergeometric",
shapes="a, b, c, z",
extradoc="""
Gauss hypergeometric distribution
gausshyper.pdf(x,a,b,c,z) = C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for 0 <= x <= 1, a > 0, b > 0, and
C = 1/(B(a,b)F[2,1](c,a;a+b;-z))
"""
)
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma',longname="An inverted gamma",
shapes='a',extradoc="""
Inverted gamma distribution
invgamma.pdf(x,a) = x**(-a-1)/gamma(a) * exp(-1/x)
for x > 0, a > 0.
"""
)
## Inverse Normal Distribution
# scale is gamma from DATAPLOT and B from Regress
_invnorm_msg = \
"""The `invnorm` distribution will be renamed to `invgauss` after scipy 0.9"""
class invnorm_gen(rv_continuous):
def _rvs(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invnorm = invnorm_gen(a=0.0, name='invnorm', longname="An inverse normal",
shapes="mu",extradoc="""
Inverse normal distribution
NOTE: `invnorm` will be renamed to `invgauss` after scipy 0.9
invnorm.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', longname="An inverse Gaussian",
shapes="mu",extradoc="""
Inverse Gaussian distribution
invgauss.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverted Weibull
class invweibull_gen(rv_continuous):
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),arr(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0,name='invweibull',
longname="An inverted Weibull",
shapes='c',extradoc="""
Inverted Weibull distribution
invweibull.pdf(x,c) = c*x**(-c-1)*exp(-x**(-c))
for x > 0, c > 0.
"""
)
## Johnson SB
class johnsonsb_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0,b=1.0,name='johnsonb',
longname="A Johnson SB",
shapes="a, b",extradoc="""
Johnson SB distribution
johnsonsb.pdf(x,a,b) = b/(x*(1-x)) * phi(a + b*log(x/(1-x)))
for 0 < x < 1 and a,b > 0, and phi is the normal pdf.
"""
)
## Johnson SU
class johnsonsu_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu',longname="A Johnson SU",
shapes="a, b", extradoc="""
Johnson SU distribution
johnsonsu.pdf(x,a,b) = b/sqrt(x**2+1) * phi(a + b*log(x+sqrt(x**2+1)))
for all x, a,b > 0, and phi is the normal pdf.
"""
)
## Laplace Distribution
class laplace_gen(rv_continuous):
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace', longname="A Laplace",
extradoc="""
Laplacian distribution
laplace.pdf(x) = 1/2*exp(-abs(x))
"""
)
## Levy Distribution
class levy_gen(rv_continuous):
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy", longname = "A Levy", extradoc="""
Levy distribution
levy.pdf(x) = 1/(x*sqrt(2*pi*x)) * exp(-1/(2*x))
for x > 0.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
"""
)
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0,name="levy_l", longname = "A left-skewed Levy", extradoc="""
Left-skewed Levy distribution
levy_l.pdf(x) = 1/(abs(x)*sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for x < 0.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
"""
)
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', longname="A Levy-stable",
shapes="alpha, beta", extradoc="""
Levy-stable distribution (only random variates available -- ignore other docs)
"""
)
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic', longname="A logistic",
extradoc="""
Logistic distribution
logistic.pdf(x) = exp(-x)/(1+exp(-x))**2
"""
)
## Log Gamma
#
class loggamma_gen(rv_continuous):
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', longname="A log gamma", shapes='c',
extradoc="""
Log gamma distribution
loggamma.pdf(x,c) = exp(c*x-exp(x)) / gamma(c)
for all x, c > 0.
"""
)
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace',
longname="A log-Laplace",shapes='c',
extradoc="""
Log-Laplace distribution (Log Double Exponential)
loglaplace.pdf(x,c) = c/2*x**(c-1) for 0 < x < 1
= c/2*x**(-c-1) for x >= 1
for c > 0.
"""
)
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm',
longname='A lognormal', shapes='s',
extradoc="""
Lognormal distribution
lognorm.pdf(x,s) = 1/(s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for x > 0, s > 0.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
"""
)
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat', longname='A Gilbrat',
extradoc="""
Gilbrat distribution
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
"""
)
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
Probability density function. Given by :math:`\sqrt(2/\pi)x^2 exp(-x^2/2)`
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell', extradoc="""
Maxwell distribution
maxwell.pdf(x) = sqrt(2/pi) * x**2 * exp(-x**2/2)
for x > 0.
"""
)
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', longname="A Mielke's Beta-Kappa",
shapes="k, s", extradoc="""
Mielke's Beta-Kappa distribution
mielke.pdf(x,k,s) = k*x**(k-1) / (1+x**s)**(1+k/s)
for x > 0.
"""
)
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", longname="A Nakagami",
shapes='nu', extradoc="""
Nakagami distribution
nakagami.pdf(x,nu) = 2*nu**nu/gamma(nu) * x**(2*nu-1) * exp(-nu*x**2)
for x > 0, nu > 0.
"""
)
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _pdf(self, x, df, nc):
a = arr(df/2.0)
Px = exp(-nc/2.0)*special.hyp0f1(a,nc*x/4.0)
Px *= exp(-x/2.0)*x**(a-1) / arr(2**a * special.gamma(a))
return Px
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', longname="A non-central chi-squared",
shapes="df, nc", extradoc="""
Non-central chi-squared distribution
ncx2.pdf(x,df,nc) = exp(-(nc+df)/2)*1/2*(x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for x > 0.
"""
)
# Non-central F
class ncf_gen(rv_continuous):
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', longname="A non-central F distribution",
shapes="dfn, dfd, nc", extradoc="""
Non-central F distribution
ncf.pdf(x,df1,df2,nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for df1, df2, nc > 0.
"""
)
## Student t distribution
class t_gen(rv_continuous):
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = arr(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t',longname="Student's T",
shapes="df", extradoc="""
Student's T distribution
gamma((df+1)/2)
t.pdf(x,df) = -----------------------------------------------
sqrt(pi*df)*gamma(df/2)*(1+x**2/df)**((df+1)/2)
for df > 0.
"""
)
## Non-central T distribution
class nct_gen(rv_continuous):
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= arr(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= arr(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", longname="A Noncentral T",
shapes="df, nc", extradoc="""
Non-central Student T distribution
df**(df/2) * gamma(df+1)
nct.pdf(x,df,nc) = --------------------------------------------------
2**df*exp(nc**2/2)*(df+x**2)**(df/2) * gamma(df/2)
for df > 0, nc > 0.
"""
)
# Pareto
class pareto_gen(rv_continuous):
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
mu = place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
mu2 = place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
g1 = place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
g2 = place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", longname="A Pareto",
shapes="b", extradoc="""
Pareto distribution
pareto.pdf(x,b) = b/x**(b+1)
for x >= 1, b > 0.
"""
)
# LOMAX (Pareto of the second kind.)
# Special case of Pareto of the first kind (location=-1.0)
class lomax_gen(rv_continuous):
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax",
longname="A Lomax (Pareto of the second kind)",
shapes="c", extradoc="""
Lomax (Pareto of the second kind) distribution
lomax.pdf(x,c) = c / (1+x)**(c+1)
for x >= 0, c > 0.
"""
)
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return a/(a+1.0), a*(a+2.0)/(a+1.0)**2, \
2*(1.0-a)*sqrt((a+2.0)/(a*(a+3.0))), \
6*polyval([1,-1,-6,2],a)/(a*(a+3.0)*(a+4))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw",
longname="A power-function",
shapes="a", extradoc="""
Power-function distribution
powerlaw.pdf(x,a) = a*x**(a-1)
for 0 <= x <= 1, a > 0.
"""
)
# Power log normal
class powerlognorm_gen(rv_continuous):
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm",
longname="A power log-normal",
shapes="c, s", extradoc="""
Power log-normal distribution
powerlognorm.pdf(x,c,s) = c/(x*s) * phi(log(x)/s) * (Phi(-log(x)/s))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, s,c > 0.
"""
)
# Power Normal
class powernorm_gen(rv_continuous):
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', longname="A power normal",
shapes="c", extradoc="""
Power normal distribution
powernorm.pdf(x,c) = c * phi(x)*(Phi(-x))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, c > 0.
"""
)
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0,b=1.0, name="rdist", longname="An R-distributed",
shapes="c", extradoc="""
R-distribution
rdist.pdf(x,c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for -1 <= x <= 1, c > 0.
"""
)
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh",
longname="A Rayleigh",
extradoc="""
Rayleigh distribution
rayleigh.pdf(r) = r * exp(-r**2/2)
for x >= 0.
"""
)
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal",
longname="A reciprocal",
shapes="a, b", extradoc="""
Reciprocal distribution
reciprocal.pdf(x,a,b) = 1/(x*log(b/a))
for a <= x <= b, a,b > 0.
"""
)
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", longname="A Rice",
shapes="b", extradoc="""
Rician distribution
rice.pdf(x,b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for x > 0, b > 0.
"""
)
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
# xb=50 or something large is necessary for stats to converge without exception
recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',
longname="A reciprocal inverse Gaussian",
shapes="mu", extradoc="""
Reciprocal inverse Gaussian
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for x >= 0.
"""
)
# Semicircular
class semicircular_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0,b=1.0, name="semicircular",
longname="A semicircular",
extradoc="""
Semicircular distribution
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for -1 <= x <= 1.
"""
)
# Triangular
# up-sloping line from loc to (loc + c*scale) and then downsloping line from
# loc + c*scale to loc + scale
# _trstr = "Left must be <= mode which must be <= right with left < right"
class triang_gen(rv_continuous):
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", longname="A Triangular",
shapes="c", extradoc="""
Triangular distribution
up-sloping line from loc to (loc + c*scale) and then downsloping
for (loc + c*scale) to (loc+scale).
- standard form is in the range [0,1] with c the mode.
- location parameter shifts the start to loc
- scale changes the width from 1 to scale
"""
)
# Truncated Exponential
class truncexpon_gen(rv_continuous):
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon',
longname="A truncated exponential",
shapes="b", extradoc="""
Truncated exponential distribution
truncexpon.pdf(x,b) = exp(-x)/(1-exp(-b))
for 0 < x < b.
"""
)
# Truncated Normal
class truncnorm_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', longname="A truncated normal",
shapes="a, b", extradoc="""
Truncated Normal distribution.
The standard form of this distribution is a standard normal truncated to the
range [a,b] --- notice that a and b are defined over the domain
of the standard normal. To convert clip values for a specific mean and
standard deviation use a,b = (myclip_a-my_mean)/my_std, (myclip_b-my_mean)/my_std
"""
)
# Tukey-Lambda
# A flexible distribution ranging from Cauchy (lam=-1)
# to logistic (lam=0.0)
# to approx Normal (lam=0.14)
# to u-shape (lam = 0.5)
# to Uniform from -1 to 1 (lam = 1)
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = arr(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (arr(1-Fx))**(lam-1.0)
Px = 1.0/arr(Px)
return where((lam <= 0) | (abs(x) < 1.0/arr(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
mu2 = 2*gam(lam+1.5)-lam*pow(4,-lam)*sqrt(pi)*gam(lam)*(1-2*lam)
mu2 /= lam*lam*(1+2*lam)*gam(1+1.5)
mu4 = 3*gam(lam)*gam(lam+0.5)*pow(2,-2*lam) / lam**3 / gam(2*lam+1.5)
mu4 += 2.0/lam**4 / (1+4*lam)
mu4 -= 2*sqrt(3)*gam(lam)*pow(2,-6*lam)*pow(3,3*lam) * \
gam(lam+1.0/3)*gam(lam+2.0/3) / (lam**3.0 * gam(2*lam+1.5) * \
gam(lam+0.5))
g2 = mu4 / mu2 / mu2 - 3.0
return 0, mu2, 0, g2
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', longname="A Tukey-Lambda",
shapes="lam", extradoc="""
Tukey-Lambda distribution
A flexible distribution ranging from Cauchy (lam=-1)
to logistic (lam=0.0)
to approx Normal (lam=0.14)
to u-shape (lam = 0.5)
to Uniform from -1 to 1 (lam = 1)
"""
)
# Uniform
# loc to loc + scale
class uniform_gen(rv_continuous):
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0,b=1.0, name='uniform', longname="A uniform",
extradoc="""
Uniform distribution
constant between loc and loc+scale
"""
)
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', longname="A Von Mises",
shapes="b", extradoc="""
Von Mises distribution
if x is not in range or loc is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
vonmises.pdf(x,b) = exp(b*cos(x)) / (2*pi*I[0](b))
for -pi <= x <= pi, b > 0.
"""
)
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function, `pdf`, is defined by
``1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))``, for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald", extradoc="""
Wald distribution
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for x > 0.
"""
)
## Weibull
## See Frechet
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
output = place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
output = place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0,b=2*pi, name='wrapcauchy',
longname="A wrapped Cauchy",
shapes="c", extradoc="""
Wrapped Cauchy distribution
wrapcauchy.pdf(x,c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for 0 <= x <= 2*pi, 0 < c < 1.
"""
)
### DISCRETE DISTRIBUTIONS
###
def entropy(pk,qk=None):
"""S = entropy(pk,qk=None)
calculate the entropy of a distribution given the p_k values
S = -sum(pk * log(pk), axis=0)
If qk is not None, then compute a relative entropy
S = sum(pk * log(pk / qk), axis=0)
Routine will normalize pk and qk if they don't sum to 1
"""
pk = arr(pk)
pk = 1.0* pk / sum(pk,axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = arr(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk,axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk,nonzero(qk==0.0),axis=0)!=0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
return -sum(vec,axis=0)
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = arr(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = arr(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
'''non-central moment of discrete distribution'''
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.fit(data, <shape(s)>, loc=0)
Parameter estimates for generic data
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
Alternatively, the object may be called (as a function) to fix
the shape and location parameters returning a
"frozen" discrete RV object:
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given shape
and location fixed.
You can construct an aribtrary discrete rv where P{X=xk} = pk
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance
poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson')
The docstring can be created from a template.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
Display frozen pmf:
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
Random number generation:
>>> R = generic.rvs(<shape(s)>, size=100)
Custom made discrete distribution:
>>> vals = [arange(7), (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)]
>>> custm = rv_discrete(name='custm', values=vals)
>>> h = plt.plot(vals[0], custm.pmf(vals[0]))
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
pmf : array-like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logpmf : array-like
Log of the probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale: array-like, optional
scale parameter (default=1)
Returns
-------
k : array-like
quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.a-1)
output = place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
k : array-like
quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## output = place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.b)
output = place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = arr(loc)
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
out0 = place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = arr(loc)
args = map(arr,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
output = place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
def _rvs(self, n, pr):
return mtrand.binomial(n,pr,self._size)
def _argcheck(self, n, pr):
self.b = n
return (n>=0) & (pr >= 0) & (pr <= 1)
def _logpmf(self, x, n, pr):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(pr) + (n-k)*np.log(1-pr)
def _pmf(self, x, n, pr):
return exp(self._logpmf(x, n, pr))
def _cdf(self, x, n, pr):
k = floor(x)
vals = special.bdtr(k,n,pr)
return vals
def _sf(self, x, n, pr):
k = floor(x)
return special.bdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.bdtrik(q,n,pr))
vals1 = vals-1
temp = special.bdtr(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
q = 1.0-pr
mu = n * pr
var = n * pr * q
g1 = (q-pr) / sqrt(n*pr*q)
g2 = (1.0-6*pr*q)/(n*pr*q)
return mu, var, g1, g2
def _entropy(self, n, pr):
k = r_[0:n+1]
vals = self._pmf(k,n,pr)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, pr",extradoc="""
Binomial distribution
Counts the number of successes in *n* independent
trials when the probability of success each time is *pr*.
binom.pmf(k,n,p) = choose(n,k)*p**k*(1-p)**(n-k)
for k in {0,1,...,n}
""")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="pr",extradoc="""
Bernoulli distribution
1 if binary experiment succeeds, 0 otherwise. Experiment
succeeds with probabilty *pr*.
bernoulli.pmf(k,p) = 1-p if k = 0
= p if k = 1
for k = 0,1
"""
)
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Probability mass function, given by
``np.choose(k+n-1, n-1) * p**n * (1-p)**k`` for ``k >= 0``.
%(example)s
"""
def _rvs(self, n, pr):
return mtrand.negative_binomial(n, pr, self._size)
def _argcheck(self, n, pr):
return (n >= 0) & (pr >= 0) & (pr <= 1)
def _pmf(self, x, n, pr):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(pr,n) * power(1-pr,x)
def _logpmf(self, x, n, pr):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(pr) + x*log(1-pr)
def _cdf(self, x, n, pr):
k = floor(x)
return special.betainc(n, k+1, pr)
def _sf_skip(self, x, n, pr):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.nbdtrik(q,n,pr))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
Q = 1.0 / pr
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, pr", extradoc="""
Negative binomial distribution
nbinom.pmf(k,n,p) = choose(k+n-1,n-1) * p**n * (1-p)**k
for k >= 0.
"""
)
## Geometric distribution
class geom_gen(rv_discrete):
def _rvs(self, pr):
return mtrand.geometric(pr,size=self._size)
def _argcheck(self, pr):
return (pr<=1) & (pr >= 0)
def _pmf(self, k, pr):
return (1-pr)**(k-1) * pr
def _logpmf(self, k, pr):
return (k-1)*log(1-pr) + pr
def _cdf(self, x, pr):
k = floor(x)
return (1.0-(1.0-pr)**k)
def _sf(self, x, pr):
k = floor(x)
return (1.0-pr)**k
def _ppf(self, q, pr):
vals = ceil(log(1.0-q)/log(1-pr))
temp = 1.0-(1.0-pr)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, pr):
mu = 1.0/pr
qr = 1.0-pr
var = qr / pr / pr
g1 = (2.0-pr) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],pr)/(1.0-pr)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="pr", extradoc="""
Geometric distribution
geom.pmf(k,p) = (1-p)**(k-1)*p
for k >= 1
"""
)
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
hypergeom = hypergeom_gen(name='hypergeom',longname="A hypergeometric",
shapes="M, n, N", extradoc="""
Hypergeometric distribution
Models drawing objects from a bin.
M is total number of objects, n is total number of Type I objects.
RV counts number of Type I objects in N drawn without replacement from
population.
hypergeom.pmf(k, M, n, N) = choose(n,k)*choose(M-n,N-k)/choose(M,N)
for N - (M-n) <= k <= min(m,N)
"""
)
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='pr', extradoc="""
Logarithmic (Log-Series, Series) distribution
logser.pmf(k,p) = - p**k / (k*log(1-p))
for k >= 1
"""
)
## Poisson distribution
class poisson_gen(rv_discrete):
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _pmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return exp(Pk)
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
g1 = 1.0/arr(sqrt(mu))
g2 = 1.0 / arr(mu)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson',
shapes="mu", extradoc="""
Poisson distribution
poisson.pmf(k, mu) = exp(-mu) * mu**k / k!
for k >= 0
"""
)
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda",
extradoc="""
Planck (Discrete Exponential)
planck.pmf(k,b) = (1-exp(-b))*exp(-b*k)
for k*b >= 0
"""
)
class boltzmann_gen(rv_discrete):
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N",
extradoc="""
Boltzmann (Truncated Discrete Exponential)
boltzmann.pmf(k,b,N) = (1-exp(-b))*exp(-b*k)/(1-exp(-b*N))
for k=0,..,N-1
"""
)
## Discrete Uniform
class randint_gen(rv_discrete):
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = arr(max), arr(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max",
extradoc="""
Discrete Uniform
Random integers >=min and <max.
randint.pmf(k,min, max) = 1/(max-min)
for min <= k < max.
"""
)
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / arr(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = errp(0)
fac = arr(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / arr(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = errp(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / arr(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a", extradoc="""
Zipf distribution
zipf.pmf(k,a) = 1/(zeta(a)*k**a)
for k >= 1
"""
)
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a", extradoc="""
Discrete Laplacian distribution.
dlaplace.pmf(k,a) = tanh(a/2) * exp(-a*abs(k))
for a > 0.
"""
)
class skellam_gen(rv_discrete):
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2", extradoc="""
Skellam distribution
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, k1-k2 follows a Skellam distribution with
parameters mu1 = lam1 - rho*sqrt(lam1*lam2) and
mu2 = lam2 - rho*sqrt(lam1*lam2), where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then rho = 0.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
"""
)
| gpl-3.0 |
rhjvedder/Plague_models | tools/run_manual.py | 1 | 7833 | import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from tools.TempReader import TempReader
class Model:
def __init__(self, directory, *args):
# -- Params
self.dir = directory
self.years_list = args[0][-2]
self.months_list = args[0][-1]
self.rat_pop = args[0][1].value
self.beta_h = 0.225
self.temp_scale = args[0][5].value
self.beta_r = args[0][6].value
self.inh_res = args[0][11].value
self.data, self.temp_list = TempReader().cooked()
self.t = [x for x in range(0, len(self.years_list))]
self.i_h = np.zeros_like(self.t, dtype=float)
self.r_h = np.zeros_like(self.t, dtype=float)
self.s_r = np.zeros_like(self.t, dtype=float)
self.i_r = np.zeros_like(self.t, dtype=float)
self.res_r = np.zeros_like(self.t, dtype=float)
self.d_r = np.zeros_like(self.t, dtype=float)
self.i_f = np.zeros_like(self.t, dtype=float)
self.fph = np.zeros_like(self.t, dtype=float)
def graph(self):
confirmed_cases = [0, 0, 0, 0, 0, 0, 8, 12, 62, 16, 2, 14, 6, 5, 0, 0, 0, 0, 1, 5, 22, 39, 11, 8, 5, 6, 2, 1, 0, 0, 10, 38, 59,
74, 13, 6, 1, 1, 0, 0, 0, 0, 4, 17, 18, 29, 9, 8, 3, 3, 1, 0, 1, 0]
scaled_cases = [0, 0, 0, 0, 0, 0, 52.0, 78.0, 403.0, 104.0, 13.0, 91.0, 36.0, 30.0, 0.0, 0.0, 0.0, 0.0, 6.0, 30.0, 132.0, 234.0, 66.0, 48.0, 15.0, 18.0, 6.0, 3.0, 0.0, 0.0, 30.0, 114.0, 177.0, 222.0, 39.0, 18.0, 3.0, 3.0, 0.0, 0.0, 0.0, 0.0, 12.0, 51.0, 54.0, 87.0, 27.0, 24.0, 24.0, 24.0, 8.0, 0.0, 8.0, 0.0]
self.plot("infected_humans", "graph of infected humans with\n max posteriori values",
infected_humans=self.i_h, confirmed_cases=confirmed_cases, scaled_cases=scaled_cases)
self.plot("infected_rats", "graph of infected rats with\n max posteriori values",
susceptible_rats=self.s_r, infected_rats=self.i_r, resistant_rats=self.res_r)
def plot(self, filename, title, **kwargs):
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
fig, ax = plt.subplots()
# plot the data
for label, data in kwargs.items():
if len(data) == len(self.years_list):
ax.plot(self.years_list, data, label=" ".join(label.split("_")))
else:
ax.plot(self.months_list, data, label=" ".join(label.split("_")))
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# set the axis limit
datemin = min(self.years_list)
datemax = max(self.years_list) + 1
ax.set_xlim(datemin, datemax)
# format the coords message box
def price(x):
return '$%1.2f' % x
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = price
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# some extra plot formating
ax.legend(loc='best')
plt.style.use('ggplot')
plt.rc('font', size=12)
plt.rc('lines', linewidth=2)
plt.rc('figure', autolayout=True)
plt.title(title)
plt.xlabel('time in months')
plt.ylabel('number of humans')
plt.savefig(filename + ".png")
def plague_model(self):
# - human
gamma_h = 0.2
p_recovery_h = .4
# - rat
gamma_r = 0.2
# .1
p_recovery_ur = .1
rep_rate_r = .4 * (1 - 0.234)
rep_rate_ur = .4
d_rate_ui = 1 / 365
# - flea
d_rate = 0.2
# 0.2
g_rate = .0084
c_cap = 6.
# human
n_h = 25000
self.i_h[0] = 0.
# rat
self.s_r[0] = self.rat_pop - 20.
self.res_r[0] = 0.
infected_rat_deaths = 0.0
# flea
n_d_rate = 0.005
searching = 3. / self.s_r[0]
self.fph[0] = 6.0
# shrews
i_rpd = 0.00167
for i, v in enumerate(self.years_list[1:], 1):
if 189 <= v.timetuple().tm_yday <= 222:
shrew_transference = i_rpd * self.s_r[i - 1]
else:
shrew_transference = 0
date_string = v.strftime("%Y-%m-%d")
temp = self.data[date_string][0] * self.temp_scale
temp_growth_factor = max(0, (temp - 15.0) / 10.0)
temp_spread_factor = (0.75 - 0.25 * np.tanh(((temp * 9. / 5.) + 32.) - 80.))
# + rec_r[i - 1]
n_r = self.s_r[i - 1] + self.i_r[i - 1] + self.res_r[i - 1]
# natural deaths
natural_death_unresistant = (self.s_r[i - 1] * d_rate_ui)
natural_death_resistant = (self.res_r[i - 1] * d_rate_ui)
natural_death_infected = (self.i_r[i - 1] * d_rate_ui)
# - Fleas
new_infectious = infected_rat_deaths * self.fph[i - 1]
# could be made temperature dependent
starvation_deaths = d_rate * self.i_f[i - 1]
# number of fleas that find a human
force_to_humans = min(self.i_f[i - 1], self.i_f[i - 1] * np.exp(float(-searching * n_r)))
# number of fleas that find a rat
force_to_rats = self.i_f[i - 1] - force_to_humans
force_to_rats = force_to_rats * temp_spread_factor
force_to_humans = force_to_humans * temp_spread_factor
self.fph[i] = self.fph[i - 1] + (temp_growth_factor * g_rate * self.fph[i - 1])\
- (n_d_rate * (1 + self.fph[i - 1] / c_cap) * self.fph[i - 1])
# should add dehydration
self.i_f[i] = max(0.0, self.i_f[i - 1] + new_infectious - starvation_deaths)
# - Rats
new_infected_rats = self.beta_r * self.s_r[i - 1] * force_to_rats / n_r
new_infected_rats = 0 if new_infected_rats < 0 else new_infected_rats
new_removed_rats = gamma_r * (self.i_r[i - 1] - natural_death_infected)
new_recovered_rats = p_recovery_ur * new_removed_rats
new_dead_rats = new_removed_rats - new_recovered_rats
infected_rat_deaths = new_dead_rats
# born rats
pressure = n_r / self.rat_pop
resistant_born_rats = rep_rate_r * self.res_r[i - 1] * (self.inh_res - pressure)
unresistant_born_rats = ((rep_rate_r * self.res_r[i - 1] * (1 - self.inh_res))
+ (rep_rate_ur * self.s_r[i - 1] * (1 - pressure)))
# time step values
self.s_r[i] = min(self.rat_pop, self.s_r[i - 1] + unresistant_born_rats - new_infected_rats
- natural_death_unresistant - shrew_transference)
self.i_r[i] = self.i_r[i - 1] + new_infected_rats - new_removed_rats - natural_death_infected\
+ shrew_transference
self.res_r[i] = self.res_r[i - 1] + new_recovered_rats + resistant_born_rats - natural_death_resistant
self.d_r[i] = new_dead_rats + natural_death_unresistant + natural_death_resistant + natural_death_infected
# - Humans
s_h = n_h - self.i_h[i - 1] - self.r_h[i - 1]
new_infected_humans = min(n_h, self.beta_h * s_h * force_to_humans / n_h) + 0.000000000001
new_removed_humans = gamma_h * self.i_h[i - 1]
new_recovered_humans = p_recovery_h * new_removed_humans
# time step values
self.i_h[i] = self.i_h[i - 1] + new_infected_humans - new_removed_humans
self.r_h[i] = self.r_h[i - 1] + new_recovered_humans
| gpl-3.0 |
jorik041/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
fabeschan/midigeneration | analyze.py | 1 | 3255 | '''
analyze.py
This file sets up infrastructure for the similarity measure using a classifier
that compares two music segments and gives a score between 0 and 1
'''
from sklearn import datasets, neighbors, linear_model, svm
from sklearn.metrics import confusion_matrix
import numpy as np
import random
from similar_sections import ss
import sys
class train_set(object):
def __init__(self, data, target):
self.data = data
self.target = target
def generate():
gen = ss.generate_targets_subset()
random.shuffle(gen)
target = np.array([ v[2] for v in gen ])
data = np.array([ f[0].compare_with(f[1]) for f in gen ])
return train_set(data, target)
def train_classifier(sdata, classifier=None):
digits = sdata
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
# data
X_train = X_digits[:]
y_train = y_digits[:]
if not classifier:
#classifier = svm.NuSVC(nu=0.01, probability=True)
#classifier = linear_model.RidgeClassifierCV()
classifier = linear_model.LogisticRegression(C=3.0)
classifier_fit = classifier.fit(X_train, y_train)
return classifier_fit
def test(sdata, classifier=None, verbose=True, verboseverbose=False):
digits = sdata
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
# data
X_train = X_digits[:.85 * n_samples]
y_train = y_digits[:.85 * n_samples]
# truths/target
X_test = X_digits[.85 * n_samples:]
y_test = y_digits[.85 * n_samples:]
if not classifier:
classifier = linear_model.RidgeClassifierCV()
classifier_fit = classifier.fit(X_train, y_train)
pred = classifier_fit.predict(X_test)
score = classifier_fit.score(X_test, y_test)
if verboseverbose:
# print the matrix of feature scores
big_matrix = np.array([ np.hstack((X_test[i], y_test[i])) for i in xrange(len(X_test)) ])
print ['Tr0Rhyt','Tr0TopL','Tr1Rhyt','Tr1TopL','Truth']
print big_matrix
if verbose:
print 'TRUTH:', y_test
print 'PREDN:', pred
print ('Classifier score: %f' % score)
return score, pred, y_test
def evaluate_n(n, sdata, classifier):
avg_score = 0.0
pred_overall, y_test_overall = np.array([]), np.array([])
for i in xrange(n):
score, pred, y_test = test(sdata, classifier, verbose=False if n > 1 else True)
avg_score += score / n
pred_overall = np.hstack((pred_overall, pred))
y_test_overall = np.hstack((y_test_overall, y_test))
sys.stdout.write("\r(Progress: %d/%d)" % (i, n))
sys.stdout.flush()
else:
sys.stdout.write("\r")
sys.stdout.flush()
print "---- Num of Repetitions:", n
print "---- Average Score:", avg_score
np.set_printoptions(linewidth=999999)
print confusion_matrix(y_test_overall, pred_overall)
if __name__ == '__main__':
# three classifiers to choose from omgz
svm = svm.NuSVC(nu=0.02)
ridge = linear_model.RidgeClassifierCV()
knn = neighbors.KNeighborsClassifier()
lr = linear_model.LogisticRegression(C=10.0)
n = 40
if len(sys.argv) == 2:
n = int(sys.argv[1])
evaluate_n(n, generate(), lr)
| mit |
ch3ll0v3k/scikit-learn | sklearn/utils/tests/test_testing.py | 144 | 4121 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
466152112/scikit-learn | sklearn/feature_extraction/text.py | 24 | 50103 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/logistic-regression-python/logistic_donut.py | 1 | 2148 | # logisitc regression classifier for the donut problem.
#
# the notes for this class can be found at:
# https://deeplearningcourses.com/c/data-science-logistic-regression-in-python
# https://www.udemy.com/data-science-logistic-regression-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
N = 1000
D = 2
R_inner = 5
R_outer = 10
# distance from origin is radius + random normal
# angle theta is uniformly distributed between (0, 2pi)
R1 = np.random.randn(N//2) + R_inner
theta = 2*np.pi*np.random.random(N//2)
X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T
R2 = np.random.randn(N//2) + R_outer
theta = 2*np.pi*np.random.random(N//2)
X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T
X = np.concatenate([ X_inner, X_outer ])
T = np.array([0]*(N//2) + [1]*(N//2)) # labels: first 50 are 0, last 50 are 1
plt.scatter(X[:,0], X[:,1], c=T)
plt.show()
# add a column of ones
# ones = np.array([[1]*N]).T # old
ones = np.ones((N, 1))
# add a column of r = sqrt(x^2 + y^2)
r = np.sqrt( (X * X).sum(axis=1) ).reshape(-1, 1)
Xb = np.concatenate((ones, r, X), axis=1)
# randomly initialize the weights
w = np.random.randn(D + 2)
# calculate the model output
z = Xb.dot(w)
def sigmoid(z):
return 1/(1 + np.exp(-z))
Y = sigmoid(z)
# calculate the cross-entropy error
def cross_entropy(T, Y):
return -(T*np.log(Y) + (1-T)*np.log(1-Y)).sum()
# let's do gradient descent 100 times
learning_rate = 0.0001
error = []
for i in range(5000):
e = cross_entropy(T, Y)
error.append(e)
if i % 500 == 0:
print(e)
# gradient descent weight udpate with regularization
# w += learning_rate * ( np.dot((T - Y).T, Xb) - 0.01*w ) # old
w += learning_rate * ( Xb.T.dot(T - Y) - 0.1*w )
# recalculate Y
Y = sigmoid(Xb.dot(w))
plt.plot(error)
plt.title("Cross-entropy per iteration")
plt.show()
print("Final w:", w)
print("Final classification rate:", 1 - np.abs(T - np.round(Y)).sum() / N)
| apache-2.0 |
arjoly/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
yejingfu/samples | tensorflow/latency_oa_analyzer.py | 1 | 9552 | #!/usr/bin/env python3
import os
import sys
import getopt
import numpy as np
import matplotlib.pyplot as plt
import re
from benchmark_common import CommonTool
from benchmark_common import TimestampHelper
'''
Internal usage only
Probably we will improve the codes(such as renaming t0, t1, etc.) if we want to make it public.
The OA module print the series of timestamp values as below:
[timestamp]t0:1544927483.05,t1:1544927513.82,t2:1544927513.86,t3:1544927513.86,t4:1544927513.89,t5:1544927513.9
where:
t0: The Camera embeded timestamp when the frame is sent out from camera
t1: The timestamp printed BEFORE VideoCapture.read()
t2: The timestamp printed AFTER VideoCapture.read()
t3: The timestamp printed AFTER the sub thread(AI) received the frame data from queue.
It is also the timestamp BEFORE call AI perception GetObjectInfo()
t4: The timestamp printed AFTER the AI perception GetObjectInfo()
t5: The timestamp printed BEFORE sending out the OA data to fusion
Reference to the source code: object_analytics.py
The basic data flow is as below:
Camera -> (t0) -> (t1) -> VideoCapture.read() -> (t2) -> frameQueue -> (t3) -> AI -> (t4) -> ObjectAnalyze -> (t5) -> [fusion]
NOTE:
The timestamp is seconds start from 1970.1.1 00:00
The frame is down-sampled, print a log record every 15 frames
'''
REG_TS = r't0:(.+),t1:(.+),t2:(.+),t3:(.+),t4:(.+),t5:(.*)'
def readTimestampFromOALog(filepath, stamps):
log = open(filepath)
lines = log.readlines()
log.close()
for i in range(len(lines)):
line = lines[i].rstrip('\n').strip()
ret = re.findall(REG_TS, line)
if len(ret) > 0 and len(ret[0]) == 6:
stamps[0].append(float(ret[0][0]) * 1000)
stamps[1].append(float(ret[0][1]) * 1000)
stamps[2].append(float(ret[0][2]) * 1000)
stamps[3].append(float(ret[0][3]) * 1000)
stamps[4].append(float(ret[0][4]) * 1000)
stamps[5].append(float(ret[0][5]) * 1000)
def analyzeOALatency(rootFolder, filenames, plotOpts):
labels = filenames
label2names = {}
for name in os.listdir(rootFolder):
for n in filenames:
if name.startswith(n):
if n not in label2names:
label2names[n] = {}
m = label2names[n]
if len(name) == len(n):
m[-1] = name
else:
ext = name[len(n) + 1:]
if re.match(r'^\d*$', ext):
m[int(name[len(n) + 1:])] = name
#print(label2names)
label2timestamps = {}
for label in label2names:
print("Analyze the log: " + label)
m = label2names[label]
keys = m.keys()
keys.sort()
filepaths = []
for k in range(len(keys)):
filepaths.append(os.path.join(rootFolder, m[keys[len(keys) - k - 1]]))
if len(filepaths) == 0:
print("No log file in the folder: " + label)
continue
#print(filepaths)
if label not in label2timestamps:
label2timestamps[label] = [[] for i in range(6)]
stamps = label2timestamps[label]
for filepath in filepaths:
readTimestampFromOALog(filepath, stamps)
# [decode, AI, publish, total, queue]
label2delays = {}
helper = TimestampHelper(-10000, 2000)
for label in label2timestamps:
stamps = label2timestamps[label]
if label not in label2delays:
label2delays[label] = [[], [], [], [], []]
delays = label2delays[label]
for i in range(len(stamps[0])):
helper.appendInterval(delays[0], stamps[0][i], stamps[2][i], False) # decode
helper.appendInterval(delays[1], stamps[3][i], stamps[4][i], False) # AI
helper.appendInterval(delays[2], stamps[4][i], stamps[5][i], False) # publish
helper.appendInterval(delays[3], stamps[0][i], stamps[5][i], False) # total
helper.appendInterval(delays[4], stamps[2][i], stamps[3][i], False) # queue
# average and quantiles
label2avg = {}
label2quantiles = {}
for label in label2delays:
delays = label2delays[label]
if label not in label2avg:
label2avg[label] = []
label2avg[label].append(CommonTool.calcAverage(delays[0]))
label2avg[label].append(CommonTool.calcAverage(delays[1]))
label2avg[label].append(CommonTool.calcAverage(delays[2]))
label2avg[label].append(CommonTool.calcAverage(delays[3]))
label2avg[label].append(CommonTool.calcAverage(delays[4]))
if label not in label2quantiles:
label2quantiles[label] = []
label2quantiles[label].append(CommonTool.calcQuantile(delays[0]))
label2quantiles[label].append(CommonTool.calcQuantile(delays[1]))
label2quantiles[label].append(CommonTool.calcQuantile(delays[2]))
label2quantiles[label].append(CommonTool.calcQuantile(delays[3]))
label2quantiles[label].append(CommonTool.calcQuantile(delays[4]))
# print avg and quantile data
print("=====average: decode, AI, publish, total, queue=======")
for label in label2avg:
avg = label2avg[label]
print("[TRACE_LATENCY_OA_AVG_" + label + "]:" + str(avg))
print("=====quantile(0, 0.25, 0.5, 0.75, 1): decode, AI, publish, total, queue==")
for label in label2quantiles:
quantile = label2quantiles[label]
print("[TRACE_LATENCY_OA_QUANTILES_" + label + "]:" + str(quantile))
# frame interval (based on camera timestamp)
label2interval = {}
for label in label2timestamps:
stamps = label2timestamps[label]
if label not in label2interval:
label2interval[label] = []
intervals = label2interval[label]
for i in range(len(stamps[0]) - 1):
tmp = stamps[0][i+1] - stamps[0][i]
if tmp < 0:
print("[TRACE_FRAME_INTERVAL_" + label + "]:" + str(i) + ":" + str(tmp))
helper.appendInterval(intervals, stamps[0][i], stamps[0][i+1])
# plot
legends = []
idx = []
plotInterval = False
if "all" in plotOpts:
legends = ["Decode", "AI", "Total(BeforeFusion)", "Queue", "FrameInterval"]
idx = [0, 1, 3, 4]
plotInterval = True
else:
if "decode" in plotOpts:
idx.append(0)
legends.append("Decode")
if "ai" in plotOpts:
idx.append(1)
legends.append("AI")
if "total" in plotOpts:
idx.append(3)
legends.append("Total(BeforeFusion)")
if "queue" in plotOpts:
idx.append(4)
legends.append("Queue")
if "interval" in plotOpts:
legends.append("FrameInterval")
plotInterval = True
if len(idx) > 0 or plotInterval == True:
plt.figure()
sz = len(labels)
if sz == 1:
delays = label2delays[labels[0]]
plt.title(labels[0])
for i in idx:
plt.plot(delays[i])
if plotInterval == True:
plt.plot(label2interval[labels[0]])
plt.legend(legends)
else:
for i in range(sz):
label = labels[i]
sub = int(str(sz) + "1" + str(i + 1))
# print("sub", sub)
delays = label2delays[label]
plt.subplot(sub)
plt.title(label)
for j in idx:
plt.plot(delays[j])
if plotInterval == True:
plt.plot(label2interval[label])
if i == 0:
plt.legend(legends)
plt.show()
def help():
print('''Usage:
$python latency_oa_analyzer.py [-h] [-p] <folder-path> [camera-log-name]
options:
p Plot the latency data, its value can be:
all plot all curves
decode plot the decode curve
ai plot the ai curve
total plot the total curve
queue plot the queue curve
interval plot the frame timestamp interval curve
All the plot options are connected with "+", for example: ai+decode
path The path to the folder where the "camera.log.xxx" is located
camera-log-name The specific camera log file name,
All the log files with same name but following numbers will be parsed, e.g. camera-log-name.log.1, camera-log-name.log.2, etc.
example:
$python latency_oa_analyzer.py -p ai ~/path/to/logs far_camera-2.log near_camera-3.log fisheye_camera-4.log
(Make sure the camera.log.xxx files are located in the folder!)
''')
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "hp:")
except getopt.GetoptError:
help()
sys.exit(2)
if len(args) == 0:
help()
sys.exit(2)
rootFolder = args[0]
filenames = args[1:]
plotOpts = []
for opt, arg in opts:
if opt == "-p":
plotOpts = arg.split("+")
#print("plotOpts", plotOpts)
elif opt == "-h":
help()
sys.exit(2)
if rootFolder == None or filenames == None or len(filenames) == 0:
print("Empty folder or file names")
help()
sys.exit(2)
if os.path.exists(rootFolder) == False:
print("The folder does not exist:" + rootFolder)
help()
sys.exit(2)
analyzeOALatency(rootFolder, filenames, plotOpts)
print("Latency OA analyzer done!")
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Interface_Mesh_Types/Interface_5/SoftContact_NonLinHardSoftShear/Interface_Test_Normal_Plot.py | 30 | 2779 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
import matplotlib;
import math;
from matplotlib.ticker import MaxNLocator
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Normal_Stress.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(normal_strain*100,normal_stress/1000,'-r',label='Analytical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Interface Type #")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
plt.hold(True)
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Interface_Surface_Adding_axial_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
normal_strain = -finput["/Model/Elements/Element_Outputs"][6,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain*100,normal_stress/1000,'-k',label='Numerical Solution', Linewidth=4, markersize=20)
plt.xlabel(r"Normal Strain [%]")
plt.ylabel(r"Normal Stress $\sigma_n [kPa]$")
#############################################################
# # # axes = plt.gca()
# # # axes.set_xlim([-7,7])
# # # axes.set_ylim([-1,1])
# outfigname = "Interface_Test_Normal_Stress.pdf";
# plt.axis([0, 5.5, 90, 101])
# legend = plt.legend()
# legend.get_frame().set_linewidth(0.0)
# legend.get_frame().set_facecolor('none')
plt.legend()
plt.savefig(outfigname, bbox_inches='tight')
# plt.show() | cc0-1.0 |
RPGOne/Skynet | imbalanced-learn-master/examples/over-sampling/plot_smote_svm.py | 3 | 1830 | """
=========
SMOTE SVM
=========
An illustration of the random SMOTE SVM method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply SMOTE SVM
sm = SMOTE(kind='svm')
X_resampled, y_resampled = sm.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('SMOTE svm')
plt.show()
| bsd-3-clause |
wkretzsch/bcftools | misc/plot-roh.py | 1 | 13378 | #!/usr/bin/python
import glob, gzip, csv, sys, os, copy, re
csv.register_dialect('tab', delimiter='\t', quoting=csv.QUOTE_NONE)
def usage(msg=None):
if msg==None:
print 'Usage: plot.py [OPTIONS] <dir>'
print 'Options:'
print ' -H, --highlight +group1,-group2 Highlight calls shared within group1 but not present in group2'
print ' -i, --interactive Run interactively'
print ' -l, --min-length <num> Filter input regions shorter than this [0]'
print ' -n, --min-markers <num> Filter input regions with fewer marker than this [0]'
print ' -o, --outfile <file> Output file name [plot.png]'
print ' -q, --min-qual <num> Filter input regions with quality smaller than this [0]'
print ' -r, --region [^]<chr|chr:beg-end> Plot this chromosome/region only'
print ' -s, --samples <file> List of samples to show, rename or group: "name[\\tnew_name[\\tgroup]]"'
print ' -h, --help This usage text'
print 'Matplotlib options:'
print ' +adj, --adjust <str> Set plot adjust [bottom=0.18,left=0.07,right=0.98]'
print ' +dpi, --dpi <num> Set bitmap DPI [150]'
print ' +sxt, --show-xticks Show x-ticks (genomic coordinate)'
print ' +xlb, --xlabel <str> Set x-label'
print ' +xli, --xlimit <num> Extend x-range by this fraction [0.05]'
else:
print msg
sys.exit(1)
dir = None
regs = None
min_length = 0
min_markers = 0
min_qual = 0
interactive = False
sample_file = None
highlight = None
outfile = None
adjust = 'bottom=0.18,left=0.07,right=0.98'
dpi = 150
xlim = 0.05
show_xticks = False
xlabel = None
if len(sys.argv) < 2: usage()
args = sys.argv[1:]
while len(args):
if args[0]=='-r' or args[0]=='--region':
args = args[1:]
regs = args[0]
elif args[0]=='-i' or args[0]=='--interactive':
interactive = True
elif args[0]=='-l' or args[0]=='--min-length':
args = args[1:]
min_length = float(args[0])
elif args[0]=='-n' or args[0]=='--min-markers':
args = args[1:]
min_markers = float(args[0])
elif args[0]=='-o' or args[0]=='--outfile':
args = args[1:]
outfile = args[0]
elif args[0]=='-q' or args[0]=='--min-qual':
args = args[1:]
min_qual = float(args[0])
elif args[0]=='-H' or args[0]=='--highlight':
args = args[1:]
highlight = args[0]
elif args[0]=='-s' or args[0]=='--samples':
args = args[1:]
sample_file = args[0]
elif args[0]=='-?' or args[0]=='-h' or args[0]=='--help':
usage()
elif args[0]=='+adj' or args[0]=='--adjust':
args = args[1:]
adjust = args[0]
elif args[0]=='+dpi' or args[0]=='--dpi':
args = args[1:]
dpi = float(args[0])
elif args[0]=='+xlb' or args[0]=='--xlabel':
args = args[1:]
xlabel = args[0]
elif args[0]=='+sxt' or args[0]=='--show-xticks':
show_xticks = True
elif args[0]=='+xli' or args[0]=='--xlimit':
args = args[1:]
xlim = float(args[0])
else:
dir = args[0]
args = args[1:]
if interactive and outfile!=None: usage("Use -i, --interactive or -o, --outfile, but not both")
if not interactive and outfile==None: outfile = 'plot.png'
def wrap_hash(**args): return args
adjust = eval("wrap_hash("+adjust+")")
import matplotlib as mpl
for gui in ['TKAgg','GTKAgg','Qt4Agg','WXAgg','MacOSX']:
try:
mpl.use(gui,warn=False, force=True)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
break
except:
continue
cols = [ '#337ab7', '#5cb85c', '#5bc0de', '#f0ad4e', '#d9534f', 'grey', 'black' ]
mpl.rcParams['axes.color_cycle'] = cols
globstr = os.path.join(dir, '*.txt.gz')
fnames = glob.glob(globstr)
if len(fnames)==0: usage("No data files found in \""+dir+"\"")
def parse_regions(str):
if str==None: return None
regs = { 'inc':[], 'exc':[] }
list = str.split(',')
key = 'inc'
if list[0][0]=='^':
key = 'exc'
list[0] = list[0][1:]
for reg in list:
x = reg.split(':')
chr = x[0]
beg = 0
end = (1<<32)-1
if len(x)>1:
(beg,end) = x[1].split('-')
beg = float(beg)
end = float(end)
regs[key].append({'chr':chr,'beg':beg,'end':end})
return regs
def region_overlap(regs,chr,beg,end):
if regs==None: return (beg,end)
if len(regs['exc'])>0:
for reg in regs['exc']:
if chr==reg['chr']: return None
return (beg,end)
if len(regs['inc'])==0: return (beg,end)
for reg in regs['inc']:
if chr!=reg['chr']: continue
if beg>reg['end']: continue
if end<reg['beg']: continue
if beg<reg['beg']: beg = reg['beg']
if end>reg['end']: end = reg['end']
return (beg,end)
return None
def parse_outfile(fname):
files = re.split(r',',fname)
bname = re.search(r'^(.+)\.[^.]+$', files[0]).group(1)
for i in range(len(files)-1):
files[i+1] = bname+"."+files[i+1]
return files
def next_region(rgs):
min = None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min==None:
min = [0,0]
min[0] = reg[0]
min[1] = reg[1]
if min[0] > reg[0]: min[0] = reg[0]
if min==None: return None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min[1] > reg[1]: min[1] = reg[1]
if min[1] > reg[0] - 1 and min[0] != reg[0]: min[1] = reg[0] - 1
return min;
def merge_regions(rg):
rgs = copy.deepcopy(rg)
out = {}
while True:
min = next_region(rgs)
if min==None: break
beg = min[0]
end = min[1]
smpls = []
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if reg[0] > end: continue
if reg[1] > end:
rgs[smpl][0][0] = end + 1
else:
rgs[smpl] = rgs[smpl][1:]
if smpl not in out: out[smpl] = []
smpls.append(smpl)
if len(smpls)>1:
for smpl in smpls: out[smpl].append([beg,end])
return out
def prune_regions(groups,regions):
regs = {'+':{},'-':{}}
for smpl in regions:
grp = groups[smpl]
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1]) # reg=[beg,end] -> "beg-end"
if key not in regs[grp]: regs[grp][key] = 0
regs[grp][key] += 1
nexp = 0
for smpl in groups:
if groups[smpl]=='+': nexp += 1
for smpl in regions:
rm = []
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1])
if key in regs['-']: rm.append(reg)
elif key not in regs['+'] or regs['+'][key]!=nexp: rm.append(reg)
for reg in rm:
if reg in regions[smpl]:
regions[smpl].remove(reg)
return regions
def parse_samples(fname,highlight):
if fname==None: return (None,None,{})
samples = {}
groups = {}
grp2sgn = {}
smpl2y = {}
# parse "+name" to create a map "name":"+"
if highlight!=None:
for grp in re.split(r',', highlight):
if grp[0]!='+' and grp[0]!='-': usage("Expected + or - before the group name: "+grp)
grp2sgn[grp[1:]] = grp[0]
# read samples, renaming them
with open(fname) as f:
for line in f:
row = re.split(r'\s+', line.rstrip('\n'))
smpl = row[0]
if len(row)==1: samples[smpl] = smpl
else:
samples[smpl] = row[1]
if len(row)==3:
grp = row[2]
if grp in grp2sgn:
grp = grp2sgn[grp]
else:
grp = '+'
groups[smpl] = grp
y = len(smpl2y)
smpl2y[smpl] = y
if highlight==None: groups = None
return (samples,groups,smpl2y)
regs = parse_regions(regs)
(samples,groups,smpl2y) = parse_samples(sample_file,highlight)
dat_gt = {}
dat_rg = {}
chrs = []
for fname in fnames:
f = gzip.open(fname, 'rb')
reader = csv.reader(f, 'tab')
for row in reader:
if row[0]=='GT':
chr = row[1]
pos = int(row[2])
reg = region_overlap(regs,chr,pos,pos)
if reg==None: continue
for i in range(3,len(row),2):
smpl = row[i]
if samples!=None and smpl not in samples: continue
gt = row[i+1]
x = gt.split('/')
if x[0]=='.': continue # missing genotype ./.
dsg = 2
if x[0]!=x[1]: dsg = 1
elif x[0]=='0': continue # skip HomRef 0/0 genotypes
if chr not in dat_gt:
dat_gt[chr] = {}
chrs.append(chr)
if smpl not in dat_gt[chr]:
dat_gt[chr][smpl] = []
if smpl not in smpl2y:
y = len(smpl2y)
smpl2y[smpl] = y
dat_gt[chr][smpl].append([pos,dsg])
elif row[0]=='RG':
smpl = row[1]
if samples!=None and smpl not in samples: continue
chr = row[2]
beg = int(row[3])
end = int(row[4])
length= int(row[5])
nmark = int(row[6])
qual = float(row[7])
if length < min_length: continue
if nmark < min_markers : continue
if qual < min_qual : continue
reg = region_overlap(regs,chr,beg,end)
if chr not in dat_rg: dat_rg[chr] = {}
if smpl not in dat_rg[chr]: dat_rg[chr][smpl] = []
if reg!=None:
if beg<reg[0]: beg = reg[0]
if end>reg[1]: end = reg[1]
dat_rg[chr][smpl].append([beg,end])
if samples==None:
samples = {}
for smpl in smpl2y: samples[smpl] = smpl
# list the samples in the same order as encountered in the file, from top to bottom
for smpl in smpl2y:
smpl2y[smpl] = len(smpl2y) - smpl2y[smpl] - 1
off_list = []
off_hash = {}
off = 0
off_sep = 0
dat_rg1 = {}
for chr in chrs:
if chr in dat_rg:
rg1 = merge_regions(dat_rg[chr])
if groups!=None:
rg1 = prune_regions(groups,rg1)
if len(rg1)!=0: dat_rg1[chr] = rg1
off_hash[chr] = off
max_pos = 0
for smpl in dat_gt[chr]:
if max_pos < dat_gt[chr][smpl][-1][0]: max_pos = dat_gt[chr][smpl][-1][0]
if off_sep==0: off_sep = max_pos*0.1
off += max_pos + off_sep
off_list.append(off)
height = len(smpl2y)
if len(smpl2y)>5: heigth = 5
wh = 20,height
def bignum(num):
s = str(num); out = ''; slen = len(s)
for i in range(slen):
out += s[i]
if i+1<slen and (slen-i-1)%3==0: out += ','
return out
def format_coord(x, y):
chr = None
off = 0
for i in range(len(off_list)):
chr = chrs[i]
if off_list[i] > x: break
off = off_list[i]
return 'chr%s:%s'%(chr,bignum(int(x - off)))
fig, ax1 = plt.subplots(1, 1, figsize=wh, num=dir)
ax1.yaxis.set_ticks_position('none')
ax1.format_coord = format_coord
xtick_lbl = []
xtick_pos = []
max_x = 0
for chr in dat_gt:
off = off_hash[chr]
icol = 0
max = 0
for smpl in dat_gt[chr]:
y = smpl2y[smpl]
if chr in dat_rg and smpl in dat_rg[chr]:
for rg in dat_rg[chr][smpl]:
rect = patches.Rectangle((rg[0]+off,3*y+0.5), rg[1]-rg[0]+1, 2, color='#dddddd')
ax1.add_patch(rect)
if chr in dat_rg1 and smpl in dat_rg1[chr]:
for rg in dat_rg1[chr][smpl]:
rect = patches.Rectangle((rg[0]+off,3*y+0.5), rg[1]-rg[0]+1, 2, color='#d9534f')
ax1.add_patch(rect)
ax1.plot([x[0]+off for x in dat_gt[chr][smpl]],[x[1]+3*y for x in dat_gt[chr][smpl]],'.',color=cols[icol])
if max_x < dat_gt[chr][smpl][-1][0]+off: max_x = dat_gt[chr][smpl][-1][0]+off
if max < dat_gt[chr][smpl][-1][0]: max = dat_gt[chr][smpl][-1][0]
icol += 1
if icol >= len(cols): 0
xtick_lbl.append(chr)
xtick_pos.append(off)
ytick_lbl = []
ytick_pos = []
for chr in dat_gt:
for smpl in dat_gt[chr]:
ytick_lbl.append(samples[smpl])
ytick_pos.append(3*smpl2y[smpl]+1)
break
if xlim!=0:
ax1.set_xlim(0,max_x+xlim*max_x)
lbl_pos = 3*(len(smpl2y)-1)
ax1.annotate(' HomAlt ',xy=(max_x,lbl_pos-1),xycoords='data',va='center')
ax1.annotate(' Het',xy=(max_x,lbl_pos-2),xycoords='data',va='center')
if not show_xticks:
ax1.set_xticks(xtick_pos)
ax1.set_xticklabels(xtick_lbl)
if xlabel!=None:
ax1.set_xlabel(xlabel)
ax1.set_yticks(ytick_pos)
ax1.set_yticklabels(ytick_lbl)
ax1.set_ylim(0,3*len(smpl2y)+0.5)
plt.subplots_adjust(**adjust)
if interactive:
plt.show()
else:
files = parse_outfile(outfile)
for file in (parse_outfile(outfile)):
plt.savefig(file,dpi=dpi)
plt.close()
| gpl-3.0 |
ch3ll0v3k/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/utils/tests/test_validation.py | 3 | 22522 | """Tests for input validation functions"""
import warnings
import os
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from sklearn.utils.testing import assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import SkipTest
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
assert_all_finite,
check_memory
)
import sklearn
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
X2 = as_float_array(X, copy=False)
assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
assert_equal(X2.dtype, np.float64)
# Test int dtypes <= 32bit
tested_dtypes = [np.bool,
np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32]
for dtype in tested_dtypes:
X = X.astype(dtype)
X2 = as_float_array(X)
assert_equal(X2.dtype, np.float32)
# Test object dtype
X = X.astype(object)
X2 = as_float_array(X, copy=True)
assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d=False
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# ensure_2d=True
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
check_array, [0, 1, 2], ensure_2d=True)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_on_mock_dataframe():
arr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
mock_df = MockDataFrame(arr)
checked_arr = check_array(mock_df)
assert_equal(checked_arr.dtype,
arr.dtype)
checked_arr = check_array(mock_df, dtype=np.float32)
assert_equal(checked_arr.dtype, np.dtype(np.float32))
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_accept_sparse_type_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
invalid_type = SVR()
msg = ("A sparse matrix was passed, but dense data is required. "
"Use X.toarray() to convert to a dense numpy array.")
assert_raise_message(TypeError, msg,
check_array, X_csr, accept_sparse=False)
assert_raise_message(TypeError, msg,
check_array, X_csr, accept_sparse=None)
msg = ("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided 'accept_sparse={}'.")
assert_raise_message(ValueError, msg.format(invalid_type),
check_array, X_csr, accept_sparse=invalid_type)
msg = ("When providing 'accept_sparse' as a tuple or list, "
"it must contain at least one string value.")
assert_raise_message(ValueError, msg.format([]),
check_array, X_csr, accept_sparse=[])
assert_raise_message(ValueError, msg.format(()),
check_array, X_csr, accept_sparse=())
assert_raise_message(TypeError, "SVR",
check_array, X_csr, accept_sparse=[invalid_type])
# Test deprecation of 'None'
assert_warns(DeprecationWarning, check_array, X, accept_sparse=None)
def test_check_array_accept_sparse_no_exception():
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
check_array(X_csr, accept_sparse=True)
check_array(X_csr, accept_sparse='csr')
check_array(X_csr, accept_sparse=['csr'])
check_array(X_csr, accept_sparse=('csr',))
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regex(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regex(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regex(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regex(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
def test_check_dataframe_fit_attribute():
# check pandas dataframe with 'fit' column does not raise error
# https://github.com/scikit-learn/scikit-learn/issues/8415
try:
import pandas as pd
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X, columns=['a', 'b', 'fit'])
check_consistent_length(X_df)
except ImportError:
raise SkipTest("Pandas not found")
def test_suppress_validation():
X = np.array([0, np.inf])
assert_raises(ValueError, assert_all_finite, X)
sklearn.set_config(assume_finite=True)
assert_all_finite(X)
sklearn.set_config(assume_finite=False)
assert_raises(ValueError, assert_all_finite, X)
class DummyMemory(object):
def cache(self, func):
return func
class WrongDummyMemory(object):
pass
def test_check_memory():
memory = check_memory("cache_directory")
assert_equal(memory.cachedir, os.path.join('cache_directory', 'joblib'))
memory = check_memory(None)
assert_equal(memory.cachedir, None)
dummy = DummyMemory()
memory = check_memory(dummy)
assert memory is dummy
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory."
" Got memory='1' instead.", check_memory, 1)
dummy = WrongDummyMemory()
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory. Got memory='{}' "
"instead.".format(dummy), check_memory, dummy)
| mit |
rexshihaoren/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jseabold/statsmodels | statsmodels/examples/ex_kernel_regression3.py | 5 | 2361 | # -*- coding: utf-8 -*-
"""script to try out Censored kernel regression
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
x2 = x**2 + 0.02 * np.random.normal(size=nobs)
y_true = np.sin(x*5)/x + 2*x - 3 * x2
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
cens_side = ['left', 'right', 'random'][2]
if cens_side == 'left':
c_val = 0.5
y_cens = np.clip(y, c_val, 100)
elif cens_side == 'right':
c_val = 3.5
y_cens = np.clip(y, -100, c_val)
elif cens_side == 'random':
c_val = 3.5 + 3 * np.random.randn(nobs)
y_cens = np.minimum(y, c_val)
model = nparam.KernelCensoredReg(endog=[y_cens],
#exog=[np.column_stack((x, x**2))], reg_type='lc',
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic', #'cv_ls', #[0.23, 434697.22], #'cv_ls',
censor_val=c_val[:,None]
#defaults=nparam.EstimatorSettings(efficient=True)
)
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
# model1 = nparam.KernelReg(endog=[y],
# exog=[x], reg_type='lc',
# var_type='c', bw='cv_ls')
# mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y_cens],
exog=[x, x2], reg_type='ll',
var_type='cc', bw='aic')#, 'cv_ls'
mean2, mfx2 = model2.fit()
print(model.bw)
#print model1.bw
print(model2.bw)
ix = np.argsort(y_cens)
ix_rev = np.zeros(nobs, int)
ix_rev[ix] = np.arange(nobs)
ix_rev = model.sortix_rev
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_cens, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean[ix_rev], lw=2, label='model 0 mean')
ax.plot(x, mean2, lw=2, label='model 2 mean')
ax.legend()
plt.show()
| bsd-3-clause |
AnimeshKoratana/DeepSpeech | bin/import_librivox.py | 1 | 8242 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
# Make sure we can import stuff from util/
# This script needs to be run from the root of the DeepSpeech repository
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import codecs
import fnmatch
import pandas
import progressbar
import subprocess
import tarfile
import unicodedata
from sox import Transformer
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print("Downloading Librivox data set (55GB) into {} if not already present...".format(data_dir))
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
TRAIN_CLEAN_100_URL = "http://www.openslr.org/resources/12/train-clean-100.tar.gz"
TRAIN_CLEAN_360_URL = "http://www.openslr.org/resources/12/train-clean-360.tar.gz"
TRAIN_OTHER_500_URL = "http://www.openslr.org/resources/12/train-other-500.tar.gz"
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x): return os.path.split(x)[1]
train_clean_100 = base.maybe_download(filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL)
bar.update(0)
train_clean_360 = base.maybe_download(filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL)
bar.update(1)
train_other_500 = base.maybe_download(filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL)
bar.update(2)
dev_clean = base.maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
bar.update(3)
dev_other = base.maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
bar.update(4)
test_clean = base.maybe_download(filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL)
bar.update(5)
test_other = base.maybe_download(filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL)
bar.update(6)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100)
bar.update(0)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360)
bar.update(1)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500)
bar.update(2)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
bar.update(3)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
bar.update(4)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
bar.update(5)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
bar.update(6)
# Convert FLAC data to wav, from:
# data_dir/LibriSpeech/split/1/2/1-2-3.flac
# to:
# data_dir/LibriSpeech/split-wav/1-2-3.wav
#
# And split LibriSpeech transcriptions, from:
# data_dir/LibriSpeech/split/1/2/1-2.trans.txt
# to:
# data_dir/LibriSpeech/split-wav/1-2-0.txt
# data_dir/LibriSpeech/split-wav/1-2-1.txt
# data_dir/LibriSpeech/split-wav/1-2-2.txt
# ...
print("Converting FLAC to WAV and splitting transcriptions...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
train_100 = _convert_audio_and_split_sentences(work_dir, "train-clean-100", "train-clean-100-wav")
bar.update(0)
train_360 = _convert_audio_and_split_sentences(work_dir, "train-clean-360", "train-clean-360-wav")
bar.update(1)
train_500 = _convert_audio_and_split_sentences(work_dir, "train-other-500", "train-other-500-wav")
bar.update(2)
dev_clean = _convert_audio_and_split_sentences(work_dir, "dev-clean", "dev-clean-wav")
bar.update(3)
dev_other = _convert_audio_and_split_sentences(work_dir, "dev-other", "dev-other-wav")
bar.update(4)
test_clean = _convert_audio_and_split_sentences(work_dir, "test-clean", "test-clean-wav")
bar.update(5)
test_other = _convert_audio_and_split_sentences(work_dir, "test-other", "test-other-wav")
bar.update(6)
# Write sets to disk as CSV files
train_100.to_csv(os.path.join(data_dir, "librivox-train-clean-100.csv"), index=False)
train_360.to_csv(os.path.join(data_dir, "librivox-train-clean-360.csv"), index=False)
train_500.to_csv(os.path.join(data_dir, "librivox-train-other-500.csv"), index=False)
dev_clean.to_csv(os.path.join(data_dir, "librivox-dev-clean.csv"), index=False)
dev_other.to_csv(os.path.join(data_dir, "librivox-dev-other.csv"), index=False)
test_clean.to_csv(os.path.join(data_dir, "librivox-test-clean.csv"), index=False)
test_other.to_csv(os.path.join(data_dir, "librivox-test-other.csv"), index=False)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
# We also convert the corresponding FLACs to WAV in the same pass
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space+1:]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
transcript = transcript.lower().strip()
# Convert corresponding FLAC to a WAV
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not os.path.exists(wav_file):
Transformer().build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
return pandas.DataFrame(data=files, columns=["wav_filename", "wav_filesize", "transcript"])
if __name__ == "__main__":
_download_and_preprocess_data(sys.argv[1])
| mpl-2.0 |
michelts/aloe_django | tests/integration/django/dill/leaves/features/steps.py | 3 | 1684 | #
import io
import json
import sys
from django.core.management import call_command
from leaves.models import (
Harvester,
Panda,
)
from aloe import after, step
from aloe.tools import guess_types
from aloe_django.steps.models import (
test_existence,
tests_existence,
write_models,
writes_models,
)
from nose.tools import assert_equals
max_rego = 0
@writes_models(Harvester)
def write_with_rego(data, field=None):
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
write_models(Harvester, data, field=field)
@tests_existence(Harvester)
def check_with_rego(queryset, data):
try:
data['rego'] = data['rego'].upper()
except KeyError:
pass
return test_existence(queryset, data)
@step(r'The database dump is as follows')
def database_dump(step):
if sys.version_info >= (3, 0):
output = io.StringIO()
else:
output = io.BytesIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print("Harvester count: %d" % Harvester.objects.count())
@writes_models(Panda)
def write_pandas(data, field):
# It is not necessary to call hashes_data/guess_types, but it might be
# present in old code using the library. Test that it is a no-op
# in that case.
data = guess_types(data)
for hash_ in data:
if 'name' in hash_:
hash_['name'] += ' Panda'
return write_models(Panda, data, field)
| gpl-3.0 |
jor-/scipy | scipy/ndimage/interpolation.py | 4 | 27090 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import itertools
import numpy
import warnings
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docdict
from scipy._lib import doccer
# Change the default 'reflect' to 'constant' via modifying a copy of docdict
docdict_copy = docdict.copy()
del docdict
docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'",
"Default is 'constant'")
docfiller = doccer.filldoc(docdict_copy)
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using b-splines of `order > 1`, the input image
values have to be converted to b-spline coefficients first, which is
done by applying this one-dimensional filter sequentially along all
axes of the input. All functions that require b-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input, shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a one-dimensional or
two-dimensional array. If a one-dimensional array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behaviour of affine_transform with a one-dimensional "
"array supplied for the matrix parameter has changed in "
"scipy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
zoom : ndarray
The zoomed input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
zoom_div = numpy.array(output_shape, float) - 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
output = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = misc.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_tight_layout(True)
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least two-dimensional')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
angle_rad = numpy.deg2rad(angle)
c, s = numpy.cos(angle_rad), numpy.sin(angle_rad)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
output = _ni_support._get_output(output, input_arr, shape=output_shape)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output
| bsd-3-clause |
liyu1990/sklearn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
JoergFritz/genRouteDb | calculateRouteFeatures.py | 1 | 4297 | # calculate Popularity, Circularity and Offroad scores for newly added routes
import pysal
import MySQLdb as mdb
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from haversine import haversine
# connect to database with running routes
con=mdb.connect(host="mysql.server",user="JoergFritz", \
db="JoergFritz$runRoutesTest",passwd="you-wish")
cur = con.cursor(mdb.cursors.DictCursor)
# get data from databse
cur.execute("SELECT MapMyRunId,StartLat,QuarterLat,HalfLat,ThreeQuarterLat,StartLng,QuarterLng,HalfLng,ThreeQuarterLng,Distance from Tracks")
rowsTracks = cur.fetchall()
numPoints = 4*len(rowsTracks)
lat = np.zeros(numPoints)
lng = np.zeros(numPoints)
cur.execute("SELECT City,Lat,Lng from Cities")
rowsCities = cur.fetchall()
# first loop for calculation of kenel density for popularity
n = 0
for row in rowsTracks:
mapMyRunId = row['MapMyRunId']
startLat = row['StartLat']
lat[n] = startLat
startLng = row['StartLng']
lng[n] = startLng
n = n+1
quarterLat = row['QuarterLat']
lat[n] = quarterLat
quarterLng = row['QuarterLng']
lng[n] = quarterLng
n = n+1
halfLat = row['HalfLat']
lat[n] = halfLat
halfLng = row['HalfLng']
lng[n] = halfLng
n = n+1
threeQuarterLat = row['ThreeQuarterLat']
lat[n] = threeQuarterLat
threeQuarterLng = row['ThreeQuarterLng']
lng[n] = threeQuarterLng
n = n+1
if (n % 1000) == 0:
print n
#latMin = lat.min()
#latMax = lat.max()
#lngMin = lng.min()
#lngMax = lng.max()
#X, Y = np.mgrid[latMin:latMax:100j, lngMin:lngMax:100j]
#positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([lat, lng])
kernel = stats.gaussian_kde(values)
# initialize variables
pointsLat=np.zeros(4)
pointsLng=np.zeros(4)
dist = np.zeros(len(rowsCities))
n=0
for row in rowsTracks:
mapMyRunId = row['MapMyRunId']
startLat = row['StartLat']
startLng = row['StartLng']
quarterLat = row['QuarterLat']
quarterLng = row['QuarterLng']
halfLat = row['HalfLat']
halfLng = row['HalfLng']
threeQuarterLat = row['ThreeQuarterLat']
threeQuarterLng = row['ThreeQuarterLng']
distance = row['Distance']
# calculate popularity
startDensity = float(kernel([startLat,startLng]))
quarterDensity = float(kernel([quarterLat,quarterLng]))
halfDensity = float(kernel([halfLat,halfLng]))
threeQuarterDensity = float(kernel([threeQuarterLat,threeQuarterLng]))
avgDensity = (startDensity+quarterDensity+halfDensity+threeQuarterDensity)/4.0
cur.execute("UPDATE Tracks SET Popularity=%s WHERE MapMyRunId=%s",(avgDensity,mapMyRunId))
# calculate circularity
pointsLat=[startLat,quarterLat,halfLat,threeQuarterLat]
pointsLng=[startLng,quarterLng,halfLng,threeQuarterLng]
# edit here!
expectedDistance = 0.225*distance # distance between points if they were on circle
expectedDiameter = 2*0.159*distance
sizeLat = 1000*haversine((max(pointsLat),np.mean(pointsLng)),(min(pointsLat),np.mean(pointsLng)))
sizeLng = 1000*haversine((np.mean(pointsLat),max(pointsLng)),(np.mean(pointsLat),min(pointsLng)))
distQuarter = 1000*haversine((startLat,startLng),(quarterLat,quarterLng))
distHalf = 1000*haversine((quarterLat,quarterLng),(halfLat,halfLng))
distThreeQuarter = 1000*haversine((halfLat,halfLng),(threeQuarterLat,threeQuarterLng))
distStart = 1000*haversine((threeQuarterLat,threeQuarterLng),(startLat,startLng))
routeCircularity = 1.0 - abs(sizeLat-expectedDiameter)/expectedDiameter - abs(sizeLng-expectedDiameter)/expectedDiameter
- abs(distQuarter-expectedDistance)/expectedDistance - abs(distHalf-expectedDistance)/expectedDistance
- abs(distThreeQuarter-expectedDistance)/expectedDistance - abs(distStart-expectedDistance)/expectedDistance
cur.execute("UPDATE Tracks SET Circularity=%s WHERE MapMyRunId=%s",(routeCircularity,mapMyRunId))
# calculate simplified off-road metric
avgLat=(startLat+quarterLat+halfLat+threeQuarterLat)/4
avgLng=(startLng+quarterLng+halfLng+threeQuarterLng)/4
for i in range(len(rowsCities)):
cityLat = rowsCities[i]['Lat']
cityLng = rowsCities[i]['Lng']
dist[i] = haversine((cityLat,cityLng),(avgLat,avgLng))
sortedDistances = np.argsort(dist)[:3]
offroad = sum(sortedDistances)
cur.execute("UPDATE Tracks SET Offroad=%s WHERE MapMyRunId=%s",(offroad,mapMyRunId))
n = n+1
if (n % 1000) == 0:
print n
con.commit()
cur.close()
con.close()
| mit |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_pdf.py | 69 | 71773 | # -*- coding: iso-8859-1 -*-
"""
A PDF matplotlib backend (not yet complete)
Author: Jouni K Seppänen <jks@iki.fi>
"""
from __future__ import division
import os
import re
import sys
import time
import warnings
import zlib
import numpy as npy
from cStringIO import StringIO
from datetime import datetime
from math import ceil, cos, floor, pi, sin
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import __version__, rcParams, get_data_path
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, reverse_dict, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, Bbox, BboxBase
from matplotlib.path import Path
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g. font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * the alpha channel of images
# * image compression could be improved (PDF supports png-like compression)
# * encoding of fonts, including mathtext fonts and unicode support
# * Type 1 font support (i.e., "pdf.use_afm")
# * TTF support has lots of small TODOs, e.g. how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
# * use_tex
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(' '.join(strings[lasti:]))
return '\n'.join(result)
_string_escape_regex = re.compile(r'([\\()])')
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, float):
if not npy.isfinite(obj):
raise ValueError, "Can only output finite numbers in PDF"
r = "%.10f" % obj
return r.rstrip('0').rstrip('.')
# Integers are written as such.
elif isinstance(obj, (int, long)):
return "%d" % obj
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif is_string_like(obj):
return '(' + _string_escape_regex.sub(r'\\\1', obj) + ')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = ["<<"]
r.extend(["%s %s" % (Name(key).pdfRepr(), pdfRepr(val))
for key, val in obj.items()])
r.append(">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = ["["]
r.extend([pdfRepr(val) for val in obj])
r.append("]")
return fill(r)
# Booleans.
elif isinstance(obj, bool):
return ['false', 'true'][obj]
# The null keyword.
elif obj is None:
return 'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight: z = time.altzone
else: z = time.timezone
if z == 0: r += 'Z'
elif z < 0: r += "+%02d'%02d'" % ((-z)//3600, (-z)%3600)
else: r += "-%02d'%02d'" % (z//3600, z%3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
raise TypeError, \
"Don't know a PDF representation for %s objects." \
% type(obj)
class Reference:
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return "%d 0 R" % self.id
def write(self, contents, file):
write = file.write
write("%d 0 obj\n" % self.id)
write(pdfRepr(contents))
write("\nendobj\n")
class Name:
"""PDF name object."""
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
self.name = self._regex.sub(Name.hexify, name)
def __repr__(self):
return "<Name %s>" % self.name
def hexify(match):
return '#%02x' % ord(match.group())
hexify = staticmethod(hexify)
def pdfRepr(self):
return '/' + self.name
class Operator:
"""PDF operator object."""
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
# PDF operators (not an exhaustive list)
_pdfops = dict(close_fill_stroke='b', fill_stroke='B', fill='f',
closepath='h', close_stroke='s', stroke='S', endpath='n',
begin_text='BT', end_text='ET',
curveto='c', rectangle='re', lineto='l', moveto='m',
concat_matrix='cm',
use_xobject='Do',
setgray_stroke='G', setgray_nonstroke='g',
setrgb_stroke='RG', setrgb_nonstroke='rg',
setcolorspace_stroke='CS', setcolorspace_nonstroke='cs',
setcolor_stroke='SCN', setcolor_nonstroke='scn',
setdash='d', setlinejoin='j', setlinecap='J', setgstate='gs',
gsave='q', grestore='Q',
textpos='Td', selectfont='Tf', textmatrix='Tm',
show='Tj', showkern='TJ',
setlinewidth='w', clip='W')
Op = Bunch(**dict([(name, Operator(value))
for name, value in _pdfops.items()]))
class Stream:
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
def __init__(self, id, len, file, extra=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header """
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None: self.extra = dict()
else: self.extra = extra
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression']:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = StringIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write("%d 0 obj\n" % self.id)
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write("\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write("\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write("\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile:
"""PDF file with one page."""
def __init__(self, width, height, dpi, filename):
self.width, self.height = width, height
self.dpi = dpi
if rcParams['path.simplify']:
self.simplify = (width * dpi, height * dpi)
else:
self.simplify = None
self.nextObject = 1 # next free object id
self.xrefTable = [ [0, 65535, 'the zero object'] ]
self.passed_in_file_object = False
if is_string_like(filename):
fh = file(filename, 'wb')
elif is_writable_file_like(filename):
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write("%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write("%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.infoObject = self.reserveObject('info')
pagesObject = self.reserveObject('pages')
thePageObject = self.reserveObject('page 0')
contentObject = self.reserveObject('contents of page 0')
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.XObjectObject = self.reserveObject('external objects')
resourceObject = self.reserveObject('resources')
root = { 'Type': Name('Catalog'),
'Pages': pagesObject }
self.writeObject(self.rootObject, root)
info = { 'Creator': 'matplotlib ' + __version__ \
+ ', http://matplotlib.sf.net',
'Producer': 'matplotlib pdf backend',
'CreationDate': datetime.today() }
# Possible TODO: Title, Author, Subject, Keywords
self.writeObject(self.infoObject, info)
pages = { 'Type': Name('Pages'),
'Kids': [ thePageObject ],
'Count': 1 }
self.writeObject(pagesObject, pages)
thePage = { 'Type': Name('Page'),
'Parent': pagesObject,
'Resources': resourceObject,
'MediaBox': [ 0, 0, dpi*width, dpi*height ],
'Contents': contentObject }
self.writeObject(thePageObject, thePage)
# self.fontNames maps filenames to internal font names
self.fontNames = {}
self.nextFont = 1 # next free internal font name
self.fontInfo = {} # information on fonts: metrics, encoding
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
# The PDF spec recommends to include every procset
procsets = [ Name(x)
for x in "PDF Text ImageB ImageC ImageI".split() ]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = { 'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'ProcSet': procsets }
self.writeObject(resourceObject, resources)
# Start the content stream of the page
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
def close(self):
# End the content stream and write out the various deferred
# objects
self.endStream()
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in self.alphaStates.values()]))
self.writeHatches()
xobjects = dict(self.images.values())
for tup in self.markers.values():
xobjects[tup[0]] = tup[1]
for name, value in self.multi_byte_charprocs.items():
xobjects[name] = value
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill(map(pdfRepr, data)))
self.write('\n')
def beginStream(self, id, len, extra=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra)
def endStream(self):
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(fontprop, fontext='afm')
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in self.fontNames.items():
if filename.endswith('.afm'):
fontdictObject = self._write_afm_font(filename)
elif filename.endswith('.pfb') or filename.endswith('.pfa'):
# a Type 1 font; limited support for now
fontdictObject = self.embedType1(filename, self.fontInfo[Fx])
else:
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fontdictObject = self.embedTTF(realpath, chars[1])
fonts[Fx] = fontdictObject
#print >>sys.stderr, filename
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
fh = file(filename)
font = AFM(fh)
fh.close()
fontname = font.get_fontname()
fontdict = { 'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding') }
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedType1(self, filename, fontinfo):
# TODO: font effects such as SlantFont
fh = open(filename, 'rb')
matplotlib.verbose.report(
'Embedding Type 1 font ' + filename, 'debug')
try:
fontdata = fh.read()
finally:
fh.close()
font = FT2Font(filename)
widthsObject, fontdescObject, fontdictObject, fontfileObject = \
[ self.reserveObject(n) for n in
('font widths', 'font descriptor',
'font dictionary', 'font file') ]
firstchar = 0
lastchar = len(fontinfo.widths) - 1
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(font.postscript_name),
'FirstChar': 0,
'LastChar': lastchar,
'Widths': widthsObject,
'FontDescriptor': fontdescObject,
}
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [ Name(ch) for ch in enc ]
differencesArray = [ 0 ] + differencesArray
fontdict.update({
'Encoding': { 'Type': Name('Encoding'),
'Differences': differencesArray },
})
_, _, fullname, familyname, weight, italic_angle, fixed_pitch, \
ul_position, ul_thickness = font.get_ps_font_info()
flags = 0
if fixed_pitch: flags |= 1 << 0 # fixed width
if 0: flags |= 1 << 1 # TODO: serif
if 1: flags |= 1 << 2 # TODO: symbolic (most TeX fonts are)
else: flags |= 1 << 5 # non-symbolic
if italic_angle: flags |= 1 << 6 # italic
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(font.postscript_name),
'Flags': flags,
'FontBBox': font.bbox,
'ItalicAngle': italic_angle,
'Ascent': font.ascender,
'Descent': font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': familyname,
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
#'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdictObject, fontdict)
self.writeObject(widthsObject, fontinfo.widths)
self.writeObject(fontdescObject, descriptor)
t1font = type1font.Type1Font(filename)
self.beginStream(fontfileObject.id, None,
{ 'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0 })
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdictObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(str(filename))
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest: return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0: return floor(value)
else: return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type' : Name('Font'),
'BaseFont' : ps_name,
'FirstChar' : firstchar,
'LastChar' : lastchar,
'FontDescriptor' : fontdescObject,
'Subtype' : Name('Type3'),
'Name' : descriptor['FontName'],
'FontBBox' : bbox,
'FontMatrix' : [ .001, 0, 0, .001, 0, 0 ],
'CharProcs' : charprocsObject,
'Encoding' : {
'Type' : Name('Encoding'),
'Differences' : differencesArray},
'Widths' : widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
unicode = decode_char(charcode)
width = font.load_char(unicode, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+1) ]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)
charprocs = {}
charprocsRef = {}
for charname, stream in rawcharprocs.items():
charprocDict = { 'Length': len(stream) }
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find("d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type' : Name('Font'),
'Subtype' : Name('CIDFontType2'),
'BaseFont' : ps_name,
'CIDSystemInfo' : {
'Registry' : 'Adobe',
'Ordering' : 'Identity',
'Supplement' : 0 },
'FontDescriptor' : fontdescObject,
'W' : wObject,
'CIDToGIDMap' : cidToGidMapObject
}
type0FontDict = {
'Type' : Name('Font'),
'Subtype' : Name('Type0'),
'BaseFont' : ps_name,
'Encoding' : Name('Identity-H'),
'DescendantFonts' : [cidFontDictObject],
'ToUnicode' : toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
fontfile = open(filename, 'rb')
length1 = 0
while True:
data = fontfile.read(4096)
if not data: break
length1 += len(data)
self.currentstream.write(data)
fontfile.close()
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = [u'\u0000'] * 65536
cmap = font.get_charmap()
unicode_mapping = []
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange)))
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
ps_name = Name(font.get_sfnt()[(1,0,0,6)])
pclt = font.get_sfnt_table('pclt') \
or { 'capHeight': 0, 'xHeight': 0 }
post = font.get_sfnt_table('post') \
or { 'italicAngle': (0,0) }
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False #ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH: flags |= 1 << 0
if 0: flags |= 1 << 1 # TODO: serif
if symbolic: flags |= 1 << 2
else: flags |= 1 << 5
if sf & ITALIC: flags |= 1 << 6
if 0: flags |= 1 << 16 # TODO: all caps
if 0: flags |= 1 << 17 # TODO: small caps
if 0: flags |= 1 << 18 # TODO: force bold
descriptor = {
'Type' : Name('FontDescriptor'),
'FontName' : ps_name,
'Flags' : flags,
'FontBBox' : [ cvt(x, nearest=False) for x in font.bbox ],
'Ascent' : cvt(font.ascender, nearest=False),
'Descent' : cvt(font.descender, nearest=False),
'CapHeight' : cvt(pclt['capHeight'], nearest=False),
'XHeight' : cvt(pclt['xHeight']),
'ItalicAngle' : post['italicAngle'][1], # ???
'StemV' : 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
warnings.warn(("'%s' can not be subsetted into a Type 3 font. " +
"The entire font will be embedded in the output.") %
os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, { 'Type': Name('ExtGState'),
'CA': alpha, 'ca': alpha })
return name
def hatchPattern(self, lst):
pattern = self.hatchPatterns.get(lst, None)
if pattern is not None:
return pattern[0]
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[lst] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 144.0
density = 24.0
for lst, name in self.hatchPatterns.items():
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = { 'Procsets':
[ Name(x) for x in "PDF Text ImageB ImageC ImageI".split() ] }
self.beginStream(
ob.id, None,
{ 'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res })
# lst is a tuple of stroke color, fill color,
# number of - lines, number of / lines,
# number of | lines, number of \ lines
rgb = lst[0]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_stroke)
if lst[1] is not None:
rgb = lst[1]
self.output(rgb[0], rgb[1], rgb[2], Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
if lst[2]: # -
for j in npy.arange(0.0, sidelen, density/lst[2]):
self.output(0, j, Op.moveto,
sidelen, j, Op.lineto)
if lst[3]: # /
for j in npy.arange(0.0, sidelen, density/lst[3]):
self.output(0, j, Op.moveto,
sidelen-j, sidelen, Op.lineto,
sidelen-j, 0, Op.moveto,
sidelen, j, Op.lineto)
if lst[4]: # |
for j in npy.arange(0.0, sidelen, density/lst[4]):
self.output(j, 0, Op.moveto,
j, sidelen, Op.lineto)
if lst[5]: # \
for j in npy.arange(sidelen, 0.0, -density/lst[5]):
self.output(sidelen, j, Op.moveto,
j, sidelen, Op.lineto,
j, 0, Op.moveto,
0, j, Op.lineto)
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
## These two from backend_ps.py
## TODO: alpha (SMask, p. 518 of pdf spec)
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
a = rgba[:,:,3:]
return h, w, rgb.tostring(), a.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def writeImages(self):
for img, pair in self.images.items():
img.flipud_out()
if img.is_grayscale:
height, width, data = self._gray(img)
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
else:
height, width, data, adata = self._rgb(img)
smaskObject = self.reserveObject("smask")
stream = self.beginStream(
smaskObject.id,
self.reserveObject('length of smask stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceGray'), 'BitsPerComponent': 8 })
self.currentstream.write(adata) # TODO: predictors (i.e., output png)
self.endStream()
self.beginStream(
pair[1].id,
self.reserveObject('length of image stream'),
{'Type': Name('XObject'), 'Subtype': Name('Image'),
'Width': width, 'Height': height,
'ColorSpace': Name('DeviceRGB'), 'BitsPerComponent': 8,
'SMask': smaskObject})
self.currentstream.write(data) # TODO: predictors (i.e., output png)
self.endStream()
img.flipud_out()
def markerObject(self, path, trans, fillp, lw):
"""Return name of a marker XObject representing the given path."""
key = (path, trans, fillp is not None, lw)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
self.markers[key] = (name, ob, path, trans, fillp, lw)
else:
name = result[0]
return name
def writeMarkers(self):
for tup in self.markers.values():
name, object, path, trans, fillp, lw = tup
bbox = path.get_extents(trans)
bbox = bbox.padded(lw * 0.5)
self.beginStream(
object.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents) })
self.writePath(path, trans)
if fillp:
self.output(Op.fill_stroke)
else:
self.output(Op.stroke)
self.endStream()
#@staticmethod
def pathOperations(path, transform, simplify=None):
tpath = transform.transform_path(path)
cmds = []
last_points = None
for points, code in tpath.iter_segments(simplify):
if code == Path.MOVETO:
cmds.extend(points)
cmds.append(Op.moveto)
elif code == Path.LINETO:
cmds.extend(points)
cmds.append(Op.lineto)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
cmds.extend(points[2:])
cmds.append(Op.curveto)
elif code == Path.CURVE4:
cmds.extend(points)
cmds.append(Op.curveto)
elif code == Path.CLOSEPOLY:
cmds.append(Op.closepath)
last_points = points
return cmds
pathOperations = staticmethod(pathOperations)
def writePath(self, path, transform):
cmds = self.pathOperations(
path, transform, self.simplify)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell()
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell()
self.write("xref\n0 %d\n" % self.nextObject)
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print >>sys.stderr, \
'No offset for object %d (%s)' % (i, name)
borken = True
else:
self.write("%010d %05d n \n" % (offset, generation))
i += 1
if borken:
raise AssertionError, 'Indirect object does not exist'
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write("trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject }))
# Could add 'ID'
self.write("\nstartxref\n%d\n%%%%EOF\n" % self.startxref)
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, dpi, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.file.used_characters = self.used_characters = {}
self.mathtext_parser = MathTextParser("Pdf")
self.dpi = dpi
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = gc._fillcolor
gc._fillcolor = fillcolor
delta = self.gc.delta(gc)
if delta: self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, (str, unicode)):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# MGDTODO: Support clippath here
gc = self.new_gc()
if bbox is not None:
gc.set_clip_rectangle(bbox)
self.check_gc(gc)
h, w = im.get_size_out()
h, w = 72.0*h/self.image_dpi, 72.0*w/self.image_dpi
imob = self.file.imageObject(im)
self.file.output(Op.gsave, w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
stream = self.file.writePath(path, transform)
self.file.output(self.gc.paint())
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
self.check_gc(gc, rgbFace)
fillp = rgbFace is not None
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fillp, self.gc._linewidth)
tpath = trans.transform_path(path)
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def _setup_textpos(self, x, y, descent, angle, oldx=0, oldy=0, olddescent=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, (y + descent) - (oldy + olddescent), Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output( cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, descent, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
fontsize *= self.dpi/72.0
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype), Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
fontsize *= self.dpi/72.0
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# Gather font information and do some setup for combining
# characters into strings.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
psfont = self.tex_font_mapping(dvifont.texname)
pdfname = self.file.fontName(psfont.filename)
if self.file.fontInfo.get(pdfname, None) is None:
self.file.fontInfo[pdfname] = Bunch(
encodingfile=psfont.encoding,
widths=dvifont.widths,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
seq += [['text', x1, y1, [chr(glyph)], x1+width]]
# Find consecutive text strings with constant x coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx = 0, 0
while i < len(seq)-1:
elt, next = seq[i:i+2]
if elt[0] == next[0] == 'text' and elt[2] == next[2]:
offset = elt[4] - next[1]
if abs(offset) < 0.1:
elt[3][-1] += next[3][0]
elt[4] += next[4]-next[1]
else:
elt[3] += [offset*1000.0/dvifont.size, next[3][0]]
elt[4] = next[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, 0, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g. variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0,0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype == 3:
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath: return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points() * self.dpi/72.0
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
descent = -b * fontsize / 1000
fonttype = 42
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
descent = font.get_descent() / 64.0
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, str) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = unicode(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1
and chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, descent, angle)
self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
olddescent = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, descent, 0, oldx, 0, olddescent, 0)
self.file.output(self.encode_string(chunk, fonttype), Op.show)
oldx = newx
olddescent = descent
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, self.dpi)
page = iter(dvi).next()
dvi.close()
# A total height (including the descent) needs to be returned.
return page.width, page.height+page.descent, page.descent
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale
h *= scale
d *= scale
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(prop, fontext='afm')
font = self.afm_font_cache.get(filename)
if font is None:
fh = file(filename)
font = AFM(fh)
self.afm_font_cache[filename] = font
fh.close()
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(str(filename))
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), self.dpi)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / self.dpi, self.file.height / self.dpi
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return `d`
def _strokep(self):
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def _fillp(self):
return ((self._fillcolor is not None or self._hatch) and
(len(self._fillcolor) <= 3 or self._fillcolor[3] != 0.0))
def close_and_paint(self):
if self._strokep():
if self._fillp():
return Op.close_fill_stroke
else:
return Op.close_stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
def paint(self):
if self._strokep():
if self._fillp():
return Op.fill_stroke
else:
return Op.stroke
else:
if self._fillp():
return Op.fill
else:
return Op.endpath
capstyles = { 'butt': 0, 'round': 1, 'projecting': 2 }
joinstyles = { 'miter': 0, 'round': 1, 'bevel': 2 }
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha):
name = self.file.alphaState(alpha)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch = hatch.lower()
lst = ( self._rgb,
self._fillcolor,
hatch.count('-') + hatch.count('+'),
hatch.count('/') + hatch.count('x'),
hatch.count('|') + hatch.count('+'),
hatch.count('\\') + hatch.count('x') )
name = self.file.hatchPattern(lst)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while (self._cliprect, self._clippath) != (cliprect, clippath) \
and self.parent is not None:
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if (self._cliprect, self._clippath) != (cliprect, clippath):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
cmds.extend(
PdfFile.pathOperations(
*clippath.get_transformed_path_and_affine()) +
[Op.clip, Op.endpath])
return cmds
commands = (
(('_cliprect', '_clippath'), clip_cmd), # must come first since may pop
(('_alpha',), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
different = bool(ours != theirs)
except ValueError:
ours = npy.asarray(ours)
theirs = npy.asarray(theirs)
different = ours.shape != theirs.shape or npy.any(ours != theirs)
if different:
break
if different:
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
self._fillcolor = other._fillcolor
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPdf(thisFig)
manager = FigureManagerPdf(canvas, num)
return manager
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
ppi = 72 # Postscript points in an inch
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(ppi)
width, height = self.figure.get_size_inches()
file = PdfFile(width, height, ppi, filename)
renderer = MixedModeRenderer(
width, height, ppi, RendererPdf(file, ppi, image_dpi))
self.figure.draw(renderer)
renderer.finalize()
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureManager = FigureManagerPdf
| agpl-3.0 |
nelango/ViralityAnalysis | model/lib/sklearn/grid_search.py | 6 | 38160 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| mit |
Jnesselr/s3g | firmware_testing/ReplicatorStateTests.py | 2 | 8366 |
"""
A suite of tests to be run on a replicator with the s3g python module. These tests are broken down into several categories:
"""
import os, sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
lib_path = os.path.abspath('../s3g/')
sys.path.append(lib_path)
try:
import unittest2 as unittest
except ImportError:
import unittest
import optparse
import serial
import io
import struct
import array
import time
import s3g
import random
import csv
import matplotlib.pyplot as plt
from coding import *
axis_length_offsets = {
'x_axis':[0x0, '<I'],
'y_axis':[0x04, '<I'],
'z_axis':[0x08, '<I'],
'a_axis':[0x012, '<I'],
'b_axis':[0x016, '<I']
}
eeprom_acceleration_offsets = {
'active':[0x00,'<B'],
'default_rate':[0x02,'<h'],
'x_axis_rate':[0x04, '<h'],
'y_axis_rate':[0x06, '<h'],
'z_axis_rate':[0x08, '<h'],
'a_axis_rate':[0x0A, '<h'],
'b_axis_rate':[0x0C, '<h'],
'x_axis_jerk':[0x0E, '<BB'],
'y_axis_jerk':[0x10, '<BB'],
'z_axis_jerk':[0x12, '<BB'],
'a_axis_jerk':[0x14, '<BB'],
'b_axis_jerk':[0x16, '<BB'],
'minimum_speed':[0x18, '<h'],
'defaults_flag':[0x1A, '<B']
}
eeprom_map =[
{'name':'acceleration_settings', 'offset':0x016E, 'variables':eeprom_acceleration_offsets},
{'name':'axis_lengths', 'offset':0x018C, 'variables':axis_length_offsets},
{'name':'first_boot_flag', 'offset':0x0156, 'variables':{'first_boot':[0, '>B']}}
]
class ReplicatorStateTests(unittest.TestCase):
def setUp(self):
self.s3g = s3g.s3g()
self.s3g.file = serial.Serial(options.serialPort, '115200', timeout=1)
self.s3g.writer = s3g.StreamWriter(self.s3g.file)
self.s3g.SetExtendedPosition([0, 0, 0, 0, 0])
self.s3g.AbortImmediately()
time.sleep(2)
def tearDown(self):
self.s3g.file.close()
def ReadEEpromVariable(self, map_dict, variable):
"""
read a variable stored in eeprom
@param name: dictionary value for eeprom_map 'name'
@param variable: dictionary value for 'variable' sub set in eeprom_map dict
"""
offset = map_dict['offset'] + map_dict['variables'][variable][0]
data_type = map_dict['variables'][variable][1]
data = UnpackResponse(data_type, self.s3g.ReadFromEEPROM(offset, struct.calcsize(data_type)))
print [variable, data]
def CheckVariableRange(self, data, map_dict, variable):
"""
read a variable stored in eeprom
@param name: dictionary value for eeprom_map 'name'
@param variable: dictionary value for 'variable' sub set in eeprom_map dict
"""
valid_range = map_dict['variables'][variable][2]
self.assertTrue(data in valid_range)
def EEpromCheckForValidEntries(self):
"""
This test checks eeprom values
Additionaly eeprom checks may be added in the future
"""
for field in eeprom_map:
for var in field['variables']:
data = self.ReadEEpromVariable(field, var)
"""
# acceleration on/off
data = UnpackResponse('B', self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['active'], 1))
print data[0]
self.assertTrue( data[0] in [0,1])
# default acceleration rate
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['default_rate'], 2))
print data[0]
self.assertTrue(data[0] in range(0,5000))
# default axis acceleration rates
for i in range(0,10, 2):
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['axis_rate'] +i, 2))
print data[0]
self.assertTrue(data[0] in range(0,5000))
# default axis jerk rates
for i in range(0,8,2):
data = self.s3g.ReadFromEEPROM(acceleration_map_start + eeprom_acceleration_offsets['axis_jerk']+ i, 2)
byte_data = UnpackResponse('BB', data);
float_data = (float(byte_data[0]) + float(byte_data[1]) / 256.0)
print float_data
self.assertTrue(float_data > 0.0 and float_data < 40.0)
# default minimum speed
data = UnpackResponse('h', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['minimum_speed'], 2))
print data[0]
self.assertTrue(data[0] in range(0,40))
# acceleration defaults initialized flag
data = UnpackResponse('B', self.s3g.ReadFromEEPROM(acceleration_map_start+eeprom_acceleration_offsets['defaults_flag'], 1))
print data[0]
self.assertTrue(data[0] in [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80])
"""
def EEpromTestResetToFactory(self):
self.s3g.ResetToFactory()
self.EEpromCheckForValidEntries()
def EEpromTestFullReset(self):
for i in range(0, eeprom_map):
self.s3g.WriteToEEPROM(i, [0xFF])
self.s3g.Reset()
self.EEpromCheckForValidEntries()
def EEpromWriteInvalidValues(self):
for i in range (acceleration_map_start + 10, eeprom_map):
self.s3g.WriteToEEPROM(i, [random.randint(0,255)])
self.EEpromCheckForValidEntries()
def HeatingErrorTest(self):
tool_temps = []
heat_cycle = 0
csv_writer = csv.writer(open(options.filename, 'wb'), delimiter = ',')
print "\n"
tool_num = 2
if options.toolCount == "single":
tool_num = 1
while(heat_cycle < 50):
for tool_index in range(0,tool_num):
print "heat_cycle: %d" % (heat_cycle)
#randomize whether tool or platform is heated first
tool_first = random.randint(0,1)
if tool_first is 0:
self.s3g.SetToolheadTemperature(tool_index,225);
self.s3g.SetPlatformTemperature(tool_index,110);
else:
self.s3g.SetPlatformTemperature(tool_index,110);
self.s3g.SetToolheadTemperature(tool_index,225);
# move axes to simulate start.gcode
self.s3g.FindAxesMaximums(['x', 'y'], 300, 60)
self.s3g.FindAxesMinimums(['z'], 200, 60)
self.s3g.RecallHomePositions(['x', 'y', 'z', 'a', 'b'])
AnchorLocation = [-110.5*94.1397, -74*94.1397, 150*400, 0, 0]
self.s3g.QueueExtendedPoint(AnchorLocation, 200)
start_time = time.time()
finished = False
while finished is False:
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
tool_status = self.s3g.GetToolStatus(tool_index)
for error, status in tool_status.iteritems() :
if status is True:
finished = True
if error is not "ExtruderReady":
print tool_status
print "tool head %d fail" % (tool_index)
if tool_first is True:
print "tool heated before platform"
else:
print "tool heated after platform"
print "elapsed time: %d" % (time.time() - start_time)
print "heat cycles: %d" % (heat_cycle)
plt.plot(tool_temps)
plt.show()
self.assertFalse(status)
time.sleep(0.3)
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
print "time: %d temp: %d count: %d " % (time.time() - start_time, tool_temps[-1], len(tool_temps))
self.s3g.SetToolheadTemperature(tool_index, 0)
self.s3g.SetPlatformTemperature(tool_index, 0)
# give the tool a random amount of time to cool
cool_time = (float(random.randint(1,16))/2) * 60
start_time = time.time()
print "cool time: %f minutes" % (cool_time/60)
while time.time() - start_time < cool_time:
tool_temps.append(self.s3g.GetToolheadTemperature(tool_index))
csv_writer.writerow([time.time(), tool_temps[-1]])
time.sleep(0.03)
heat_cycle += 1
plt.plot(tool_temps)
plt.show()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="serialPort", default="/dev/ttyACM0")
parser.add_option("-f", "--file", dest="filename", default="temp_data_back.csv")
parser.add_option("-t", "--tool_count", dest="toolCount", default="dual")
(options, args) = parser.parse_args()
del sys.argv[1:]
tests = unittest.TestSuite()
tests.addTest(ReplicatorStateTests('EEpromCheckForValidEntries'))
unittest.TextTestRunner(verbosity=2).run(tests)
| agpl-3.0 |
lohraspco/InterdependentNetworks | netClique.py | 1 | 2488 |
'''
Created on Jul 15, 2017
'''
import networkx as nx
import matplotlib.pylab as plt
import itertools as it
import random
def draw_circle_around_clique(clique,coords):
dist=0
temp_dist=0
center=[0 for i in range(2)]
color=next(colors)
for a in clique:
for b in clique:
temp_dist=(coords[a][0]-coords[b][0])**2+(coords[a][1]-coords[b][1])**2
if temp_dist>dist:
dist=temp_dist
for i in range(2):
center[i]=(coords[a][i]+coords[b][i])/2
rad=dist**0.5/2
cir = plt.Circle((center[0],center[1]), radius=rad*1.3,fill=False,color=color,hatch=next(hatches))
plt.gca().add_patch(cir)
plt.axis('scaled')
# return color of the circle, to use it as the color for vertices of the cliques
return color
global colors, hatches
colors=it.cycle('bgrcmyk')# blue, green, red, ...
hatches=it.cycle('/\|-+*')
no = 36
# create a random graph
G=nx.gnp_random_graph(n=no,p=0.4)
# remember the coordinates of the vertices
#coords=nx.spring_layout(G)
coords=nx.circular_layout(G)
fig = plt.figure()
#draw the graph
nx.draw(G,pos=coords,edge_color='blue',node_color='red', with_labels=True,font_color='black')
fig.savefig('/home/manicbird/Dropbox/EclipseWorkSpace/fig'+str(no)+'.png')
subG = random.sample(range(no),12)
print(type(subG))
nodeshapes='so^>v<dph8'
nx.draw_networkx_nodes(G.subgraph(subG), pos=coords, node_color='red', linewidths=3, node_shape='<')
nx.draw_networkx_edges(G.subgraph(subG), pos=coords, edge_color='black', width=3)
drawCliques = False
if drawCliques:
# remove "len(clique)>2" if you're interested in maxcliques with 2 edges
cliques=[clique for clique in nx.find_cliques(G) if len(clique)>2]
k=[len(x) for x in cliques]
counter=0
for clique in cliques:
if (k[counter]==max(k)):
print ("Clique length to appear: {} ".format(len(clique)))
nx.draw_networkx_nodes(G.subgraph(clique), pos=coords, node_color='blue')
nx.draw_networkx_edges(G.subgraph(clique), pos=coords, edge_color='black', width=2)
break
#nx.draw_networkx_nodes(G,pos=coords,nodelist=clique,node_color=draw_circle_around_clique(clique,coords))
counter += 1
plt.show()
fig.savefig('/home/manicbird/Dropbox/EclipseWorkSpace/fig'+str(no)+'clique.png')
if __name__ == '__main__':
pass
| gpl-3.0 |
yunque/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModel-sax-phrase.py | 24 | 1834 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import hpsModel as HPS
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/sax-phrase-short.wav'))
w = np.blackman(601)
N = 1024
t = -100
nH = 100
minf0 = 350
maxf0 = 700
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
hfreq, hmag, hphase, mYst = HPS.hpsModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur, Ns, stocf)
y, yh, yst = HPS.hpsModelSynth(hfreq, hmag, hphase, mYst, Ns, H, fs)
maxplotfreq = 10000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.autoscale(tight=True)
plt.title('x (sax-phrase-short.wav)')
plt.subplot(312)
numFrames = int(mYst[:,0].size)
sizeEnv = int(mYst[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('harmonics + stochastic')
plt.subplot(313)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.autoscale(tight=True)
plt.title('y')
plt.tight_layout()
plt.savefig('hpsModel-sax-phrase.png')
UF.wavwrite(y, fs, 'sax-phrase-hps-synthesis.wav')
UF.wavwrite(yh, fs, 'sax-phrase-harmonic.wav')
UF.wavwrite(yst, fs, 'sax-phrase-stochastic.wav')
plt.show()
| agpl-3.0 |
icexelloss/spark | python/pyspark/sql/tests/test_pandas_udf_scalar.py | 3 | 37309 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import random
import shutil
import sys
import tempfile
import time
import unittest
if sys.version >= '3':
unicode = str
from datetime import date, datetime
from decimal import Decimal
from pyspark.rdd import PythonEvalType
from pyspark.sql import Column
from pyspark.sql.functions import array, col, expr, lit, sum, struct, udf, pandas_udf
from pyspark.sql.types import Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled,\
test_not_compiled_message, have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
import numpy as np
@pandas_udf('double')
def random_udf(v):
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
str_f = pandas_udf(f, StringType())
int_f = pandas_udf(f, IntegerType())
long_f = pandas_udf(f, LongType())
float_f = pandas_udf(f, FloatType())
double_f = pandas_udf(f, DoubleType())
decimal_f = pandas_udf(f, DecimalType())
bool_f = pandas_udf(f, BooleanType())
array_long_f = pandas_udf(f, ArrayType(LongType()))
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
bool_f = pandas_udf(lambda x: x, BooleanType())
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
byte_f = pandas_udf(lambda x: x, ByteType())
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
short_f = pandas_udf(lambda x: x, ShortType())
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
int_f = pandas_udf(lambda x: x, IntegerType())
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
long_f = pandas_udf(lambda x: x, LongType())
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
float_f = pandas_udf(lambda x: x, FloatType())
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
double_f = pandas_udf(lambda x: x, DoubleType())
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18))
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, StringType())
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
df = self.spark.range(10)
str_f = pandas_udf(lambda x: pd.Series(map(str, x)), StringType())
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
str_f = pandas_udf(f, 'string')
int_f = pandas_udf(f, 'integer')
long_f = pandas_udf(f, 'long')
float_f = pandas_udf(f, 'float')
double_f = pandas_udf(f, 'double')
decimal_f = pandas_udf(f, 'decimal(38, 18)')
bool_f = pandas_udf(f, 'boolean')
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
str_f = pandas_udf(lambda x: x, BinaryType())
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()))
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
def func(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
f = pandas_udf(func, returnType=return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(f(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
g = pandas_udf(func, 'id: long, str: string')
actual = df.select(g(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
struct_f = pandas_udf(lambda x: x, return_type)
actual = df.select(struct_f(struct(col('id'), col('id').cast('string').alias('str'))))
self.assertEqual(expected, actual.collect())
def test_vectorized_udf_struct_complex(self):
df = self.spark.range(10)
return_type = StructType([
StructField('ts', TimestampType()),
StructField('arr', ArrayType(LongType()))])
@pandas_udf(returnType=return_type)
def f(id):
return pd.DataFrame({'ts': id.apply(lambda i: pd.Timestamp(i)),
'arr': id.apply(lambda i: [i, i + 1])})
actual = df.withColumn('f', f(col('id'))).collect()
for i, row in enumerate(actual):
id, f = row
self.assertEqual(i, id)
self.assertEqual(pd.Timestamp(i).to_pydatetime(), f[0])
self.assertListEqual([i, i + 1], f[1])
def test_vectorized_udf_nested_struct(self):
nested_type = StructType([
StructField('id', IntegerType()),
StructField('nested', StructType([
StructField('foo', StringType()),
StructField('bar', FloatType())
]))
])
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Invalid returnType with scalar Pandas UDFs'):
pandas_udf(lambda x: x, returnType=nested_type)
def test_vectorized_udf_complex(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
add = pandas_udf(lambda x, y: x + y, IntegerType())
power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
mul = pandas_udf(lambda x, y: x * y, DoubleType())
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_chained(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: x + 1, LongType())
g = pandas_udf(lambda x: x - 1, LongType())
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_chained_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
@pandas_udf(return_type)
def f(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
g = pandas_udf(lambda x: x, return_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(g(f(col('id'))).alias('struct')).collect()
self.assertEqual(expected, actual)
def test_vectorized_udf_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x * 1.0, MapType(LongType(), LongType()))
def test_vectorized_udf_return_scalar(self):
df = self.spark.range(10)
f = pandas_udf(lambda x: 1.0, DoubleType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def identity(x):
return x
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda x: x, LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_struct_with_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))\
.withColumn('name', lit('John Doe'))
@pandas_udf("first string, last string")
def split_expand(n):
return n.str.split(expand=True)
result = df.select(split_expand('name')).collect()
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('John', row[0]['first'])
self.assertEqual('Doe', row[0]['last'])
def test_vectorized_udf_varargs(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
f = pandas_udf(lambda *v: v[0], LongType())
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()))
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*scalar Pandas UDF.*ArrayType.StructType'):
pandas_udf(lambda x: x, ArrayType(StructType([StructField('a', IntegerType())])))
def test_vectorized_udf_dates(self):
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),),
(4, date(2262, 4, 12),)]
df = self.spark.createDataFrame(data, schema=schema)
date_copy = pandas_udf(lambda t: t, returnType=DateType())
df = df.withColumn("date_copy", date_copy(col("date")))
@pandas_udf(returnType=StringType())
def check_data(idx, date, date_copy):
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
# Check that a timestamp passed through a pandas_udf will not be altered by timezone calc
f_timestamp_copy = pandas_udf(lambda t: t, returnType=TimestampType())
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
@pandas_udf(returnType=StringType())
def check_data(idx, timestamp, timestamp_copy):
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
@pandas_udf('double')
def plus_ten(v):
return v + 10
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
df = self.spark.range(10)
random_udf = self.nondeterministic_vectorized_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(original_add.deterministic, True)
self.assertEqual(original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime(2015, 11, 1, 0, 30),
datetime(2015, 11, 1, 1, 30),
datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda x: x, 'timestamp')
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2(x):
assert type(x) == pd.Series
return x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4(x):
assert type(x) == pd.Series
return x + 1000
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11)
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111)
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111)
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011)
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101)
self.assertEquals(expected_chained_1.collect(), df_chained_1.collect())
self.assertEquals(expected_chained_2.collect(), df_chained_2.collect())
self.assertEquals(expected_chained_3.collect(), df_chained_3.collect())
self.assertEquals(expected_chained_4.collect(), df_chained_4.collect())
self.assertEquals(expected_chained_5.collect(), df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
expected = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111)
self.assertEquals(expected.collect(), df_multi_1.collect())
self.assertEquals(expected.collect(), df_multi_2.collect())
def test_mixed_udf_and_sql(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3(x):
assert type(x) == pd.Series
return x + 100
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111)
self.assertEquals(expected.collect(), df1.collect())
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import numpy as np
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.sources.v2.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_scalar import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
siutanwong/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
credp/lisa | lisa/analysis/latency.py | 2 | 14254 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from lisa.analysis.base import TraceAnalysisBase
from lisa.notebook import COLOR_CYCLE
from lisa.analysis.tasks import TaskState, TasksAnalysis
from lisa.datautils import df_refit_index
from lisa.trace import TaskID
class LatencyAnalysis(TraceAnalysisBase):
"""
Support for plotting Latency Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'latency'
LATENCY_THRESHOLD_ZONE_COLOR = COLOR_CYCLE[2]
LATENCY_THRESHOLD_COLOR = COLOR_CYCLE[3]
###############################################################################
# DataFrame Getter Methods
###############################################################################
@TraceAnalysisBase.cache
@TasksAnalysis.df_task_states.used_events
def _df_latency(self, task, name, curr_state, next_state):
df = self.trace.analysis.tasks.df_task_states(task)
df = df[
(df.curr_state == curr_state) &
(df.next_state == next_state)
][["delta"]]
df = df.rename(columns={'delta': name}, copy=False)
return df
@_df_latency.used_events
def df_latency_wakeup(self, task):
"""
DataFrame of a task's wakeup latencies
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:returns: a :class:`pandas.DataFrame` with:
* A ``wakeup_latency`` column (the wakeup latency at that timestamp).
"""
return self._df_latency(
task,
'wakeup_latency',
TaskState.TASK_WAKING,
TaskState.TASK_ACTIVE
)
@_df_latency.used_events
def df_latency_preemption(self, task):
"""
DataFrame of a task's preemption latencies
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:returns: a :class:`pandas.DataFrame` with:
* A ``preempt_latency`` column (the preemption latency at that timestamp).
"""
return self._df_latency(
task,
'preempt_latency',
TaskState.TASK_RUNNING,
TaskState.TASK_ACTIVE
)
@TraceAnalysisBase.cache
@TasksAnalysis.df_task_states.used_events
def df_activations(self, task):
"""
DataFrame of a task's activations
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:returns: a :class:`pandas.DataFrame` with:
* An ``activation_interval`` column (the time since the last activation).
"""
wkp_df = self.trace.analysis.tasks.df_task_states(task)
wkp_df = wkp_df[wkp_df.curr_state == TaskState.TASK_WAKING]
index = wkp_df.index.to_series()
activation_interval = (index.shift(-1) - index).shift(1)
return pd.DataFrame({'activation_interval': activation_interval})
@TraceAnalysisBase.cache
@TasksAnalysis.df_task_states.used_events
def df_runtimes(self, task):
"""
DataFrame of task's runtime each time the task blocks
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:returns: a :class:`pandas.DataFrame` with:
* The times where the task stopped running as an index
* A ``curr_state`` column (the current task state, see
:class:`lisa.analysis.tasks.TaskState`)
* A ``running_time`` column (the cumulated running time since the
last activation).
"""
df = self.trace.analysis.tasks.df_task_states(task)
runtimes = []
spurious_wkp = False
# Using df.apply() is risky for counting (can be called more than once
# on the same row), so use a loop instead
for index, row in df.iterrows():
runtime = runtimes[-1] if len(runtimes) else 0
if row.curr_state == TaskState.TASK_WAKING:
# This is required to capture strange trace sequences where
# a switch_in event is followed by a wakeup_event.
# This sequence is not expected, but we found it in some traces.
# Possible reasons could be:
# - misplaced sched_wakeup events
# - trace buffer artifacts
# TO BE BETTER investigated in kernel space.
# For the time being, we account this interval as RUNNING time,
# which is what kernelshark does.
if spurious_wkp:
runtime += row.delta
spurious_wkp = False
else:
# This is a new activation, reset the runtime counter
runtime = 0
elif row.curr_state == TaskState.TASK_ACTIVE:
# This is the spurious wakeup thing mentionned above
if row.next_state == TaskState.TASK_WAKING:
spurious_wkp = True
runtime += row.delta
runtimes.append(runtime)
df["running_time"] = runtimes
# The runtime column is not entirely correct - at a task's first
# TASK_ACTIVE occurence, the running_time will be non-zero, even
# though the task has not run yet. However, it's much simpler to
# accumulate the running_time the way we do and shift it later.
df.running_time = df.running_time.shift(1)
df.running_time = df.running_time.fillna(0)
return df[~df.curr_state.isin([
TaskState.TASK_ACTIVE,
TaskState.TASK_WAKING
])][["curr_state", "running_time"]]
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method()
@df_latency_wakeup.used_events
@df_latency_preemption.used_events
def plot_latencies(self, task: TaskID, axis, local_fig, wakeup: bool=True, preempt: bool=True,
threshold_ms: float=1):
"""
Plot the latencies of a task over time
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:param wakeup: Whether to plot wakeup latencies
:type wakeup: bool
:param preempt: Whether to plot preemption latencies
:type preempt: bool
:param threshold_ms: The latency threshold to plot
:type threshold_ms: int or float
"""
axis.axhline(threshold_ms / 1e3, linestyle='--', color=self.LATENCY_THRESHOLD_COLOR,
label=f"{threshold_ms}ms threshold")
for do_plot, name, label, df_getter in (
(wakeup, 'wakeup', 'Wakeup', self.df_latency_wakeup),
(preempt, 'preempt', 'Preemption', self.df_latency_preemption),
):
if not do_plot:
continue
df = df_getter(task)
if df.empty:
self.get_logger().warning(f"No data to plot for {name}")
else:
df = df_refit_index(df, window=self.trace.window)
df.plot(ax=axis, style='+', label=label)
axis.set_title(f'Latencies of task "{task}"')
axis.set_ylabel("Latency (s)")
axis.legend()
def _get_cdf(self, data, threshold):
"""
Build the "Cumulative Distribution Function" (CDF) for the given data
"""
# Build the series of sorted values
ser = data.sort_values()
df = pd.Series(np.linspace(0., 1., len(ser)), index=ser)
# Compute percentage of samples above/below the specified threshold
below = float(max(df[:threshold]))
above = 1 - below
return df, above, below
@df_latency_wakeup.used_events
@df_latency_preemption.used_events
def _get_latencies_df(self, task, wakeup, preempt):
wkp_df = None
prt_df = None
if wakeup:
wkp_df = self.df_latency_wakeup(task)
wkp_df = wkp_df.rename(columns={'wakeup_latency': 'latency'}, copy=False)
if preempt:
prt_df = self.df_latency_preemption(task)
prt_df = prt_df.rename(columns={'preempt_latency': 'latency'}, copy=False)
if wakeup and preempt:
df = wkp_df.append(prt_df)
else:
df = wkp_df or prt_df
return df
@TraceAnalysisBase.plot_method()
@_get_latencies_df.used_events
def plot_latencies_cdf(self, task: TaskID, axis, local_fig, wakeup: bool=True, preempt: bool=True,
threshold_ms: float=1):
"""
Plot the latencies Cumulative Distribution Function of a task
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:param wakeup: Whether to plot wakeup latencies
:type wakeup: bool
:param preempt: Whether to plot preemption latencies
:type preempt: bool
:param threshold_ms: The latency threshold to plot
:type threshold_ms: int or float
"""
df = self._get_latencies_df(task, wakeup, preempt)
threshold_s = threshold_ms / 1e3
cdf_df, above, below = self._get_cdf(df.latency, threshold_s)
cdf_df.plot(ax=axis, xlim=(0, None), label="CDF")
axis.axhline(below, linestyle='--', color=self.LATENCY_THRESHOLD_COLOR,
label=f"Latencies below {threshold_ms}ms")
axis.axvspan(0, threshold_s, facecolor=self.LATENCY_THRESHOLD_ZONE_COLOR,
alpha=0.5, label=f"{threshold_ms}ms threshold zone")
axis.set_title(f'Latencies CDF of task "{task}"')
axis.set_xlabel("Latency (s)")
axis.set_ylabel("Latencies below the x value (%)")
axis.legend()
@TraceAnalysisBase.plot_method()
@_get_latencies_df.used_events
def plot_latencies_histogram(self, task: TaskID, axis, local_fig, wakeup: bool=True,
preempt: bool=True, threshold_ms: float=1, bins: int=64):
"""
Plot the latencies histogram of a task
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
:param wakeup: Whether to plot wakeup latencies
:type wakeup: bool
:param preempt: Whether to plot preemption latencies
:type preempt: bool
:param threshold_ms: The latency threshold to plot
:type threshold_ms: int or float
"""
df = self._get_latencies_df(task, wakeup, preempt)
threshold_s = threshold_ms / 1e3
df.latency.plot.hist(bins=bins, ax=axis, xlim=(0, 1.1 * df.latency.max()))
axis.axvspan(0, threshold_s, facecolor=self.LATENCY_THRESHOLD_ZONE_COLOR, alpha=0.5,
label=f"{threshold_ms}ms threshold zone")
axis.set_title(f'Latencies histogram of task "{task}"')
axis.set_xlabel("Latency (s)")
axis.legend()
@TraceAnalysisBase.plot_method()
@df_latency_wakeup.used_events
@df_latency_preemption.used_events
def plot_latency_bands(self, task: TaskID, axis, local_fig):
"""
Draw the task wakeup/preemption latencies as colored bands
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
"""
wkl_df = self.df_latency_wakeup(task)
prt_df = self.df_latency_preemption(task)
def plot_bands(df, column, label):
if df.empty:
return
df = df_refit_index(df, window=self.trace.window)
bands = [(t, df[column][t]) for t in df.index]
color = self.get_next_color(axis)
for idx, (start, duration) in enumerate(bands):
if idx > 0:
label = None
end = start + duration
axis.axvspan(start, end, facecolor=color, alpha=0.5,
label=label)
plot_bands(wkl_df, "wakeup_latency", "Wakeup latencies")
plot_bands(prt_df, "preempt_latency", "Preemption latencies")
axis.legend()
@TraceAnalysisBase.plot_method()
@df_activations.used_events
def plot_activations(self, task: TaskID, axis, local_fig):
"""
Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_activations` of a task
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
"""
wkp_df = self.df_activations(task)
wkp_df = df_refit_index(wkp_df, window=self.trace.window)
wkp_df.plot(style='+', logy=False, ax=axis)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_title(f'Activation intervals of task "{task}"')
@TraceAnalysisBase.plot_method()
@df_runtimes.used_events
def plot_runtimes(self, task: TaskID, axis, local_fig):
"""
Plot the :meth:`lisa.analysis.latency.LatencyAnalysis.df_runtimes` of a task
:param task: The task's name or PID
:type task: int or str or tuple(int, str)
"""
df = self.df_runtimes(task)
df = df_refit_index(df, window=self.trace.window)
df.plot(style='+', ax=axis)
plot_overutilized = self.trace.analysis.status.plot_overutilized
if self.trace.has_events(plot_overutilized.used_events):
plot_overutilized(axis=axis)
axis.set_title(f'Per-activation runtimes of task "{task}"')
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| apache-2.0 |
Iolaum/Phi1337 | scripts/preprocessing/word_count_evaluation.py | 1 | 3691 | import pandas as pd
import re
import nltk
import pickle
import os
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from feature_engineering import preprocess_data
def clean_text(text):
clean_text = re.sub("[^a-zA-Z0-9]]", " ", re.sub(r'[^\x00-\x7f]', r'', text), 0, re.UNICODE)
words = clean_text.lower().split()
meaningful_words = [w for w in words if w not in stops]
return " ".join(meaningful_words)
def main(return_text=False):
# Read Files
default_atrr_name = "bullet"
# get the title from the matrix
training_data = preprocess_data()
# columns name : [u'product_uid', u'name', u'value']
attributes = pd.read_csv("../../dataset/attributes.csv")
# column names : 'title', 'description', 'attributes'
bow_col0 = 'product_uid'
bow_col1 = 'title'
bow_col2 = 'description'
bow_col3 = 'attributes'
column_orders = [bow_col0, bow_col1, bow_col2, bow_col3]
bag_of_word_matrix = dict({bow_col0: [], bow_col1: [], bow_col2: [], bow_col3: []})
prod_ids = training_data["product_uid"].unique()
counter = 0
max_count = -1
for prod_id in prod_ids:
product_title = training_data.loc[training_data['product_uid'] == prod_id].iloc[0]['product_title']
product_description = training_data.loc[training_data['product_uid'] == prod_id].iloc[0]['product_description']
prod_attributes = attributes.loc[attributes['product_uid'] == prod_id]
# print(tokenize_and_stem(clean_text(product_title)))
# print(product_description)
# print(prod_attributes.shape)
attrs = []
for i, r in prod_attributes.iterrows():
if r['name'].lower().find(default_atrr_name) != -1:
attrs.append(r['value'])
else:
str1 = str(r['name'])
str2 = str(r['value'])
mixed_str = []
if len(str1) > 0:
mixed_str.append(str1)
if len(str2) > 0:
mixed_str.append(str2)
attrs.append(' '.join(mixed_str))
all_attributes = ' '.join(attrs)
bag_of_word_matrix[bow_col0].append(prod_id)
bag_of_word_matrix[bow_col1].append(product_title.split())
bag_of_word_matrix[bow_col2].append(product_description.split())
bag_of_word_matrix[bow_col3].append(all_attributes.split())
counter += 1
if counter == max_count:
break
if counter % 1000 == 0:
print("Processed " + str(counter) + " entries.")
# create panda dataframe
df = pd.DataFrame(bag_of_word_matrix, index=prod_ids.tolist()[:counter], columns=column_orders)
# print type(df.index.values[0])
# print type(df.index[0])
df.to_pickle('../../dataset/bow_per_product.pickle')
print(" Finished creating bag of word matrix!")
# for prod_attr in prod_attributes:
# print(prod_attr)
# testing_data = pd.read_csv("test.csv", encoding="ISO-8859-1")
# for desc in descriptions:
# print(desc)
# clean_description = clean_text(desc)
# stemmed_desc = tokenize_and_stem(clean_description)
# print(stemmed_desc)
# exit()
# attribute_data = pd.read_csv("attributes.csv")
if __name__ == "__main__":
# Change return_text to decide if the cleaned result of each text will be text or list
if os.path.isfile("../../dataset/bow_per_product.pickle"):
print("Found Bag of Words DataFrame... No Need to proceed further.")
else:
print("No Bag of Words DataFrame Found... Proceed BoW creation")
main(return_text=False)
| apache-2.0 |