repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wkerzendorf/wsynphot | wsynphot/base.py | 1 | 15987 | # defining the base filter curve classes
import os
from scipy import interpolate
from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D
import pandas as pd
from wsynphot.io.cache_filters import load_filter_index, load_transmission_data
from astropy import units as u, constants as const
from astropy import utils
import numpy as np
from wsynphot.calibration import get_vega_calibration_spectrum
def calculate_filter_flux_density(spectrum, filter):
"""
Calculate the average flux through the filter by evaluating the integral
..math::
f_lambda = \\frac{\\int_}{}
Parameters
----------
spectrum: ~specutils.Spectrum1D
spectrum object
filter: ~wsynphot.FilterCurve
:return:
"""
filtered_spectrum = filter * spectrum
filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength,
filtered_spectrum.wavelength)
return filter_flux_density
def calculate_vega_magnitude(spectrum, filter):
filter_flux_density = calculate_filter_flux_density(spectrum, filter)
wavelength_delta = filter.calculate_wavelength_delta()
filtered_f_lambda = (filter_flux_density / wavelength_delta)
zp_vega_f_lambda = filter.zp_vega_f_lambda
return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda)
def calculate_ab_magnitude(spectrum, filter):
filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) /
filter.calculate_wavelength_delta())
return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda)
def list_filters():
"""
List available filter sets along with their properties
"""
return load_filter_index()
class BaseFilterCurve(object):
"""
Basic filter curve class
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength for filter curve
transmission_lambda: numpy.ndarray
transmission_lambda for filter curve
interpolation_kind: str
allowed interpolation kinds given in scipy.interpolate.interp1d
"""
@classmethod
def load_filter(cls, filter_id=None, interpolation_kind='linear'):
"""
Parameters
----------
filter_id: str or None
if None is provided will return a DataFrame of all filters
interpolation_kind: str
see scipy.interpolation.interp1d
"""
if filter_id is None:
return list_filters()
else:
filter = load_transmission_data(filter_id)
wavelength_unit = 'angstrom'
wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit)
return cls(wavelength, filter['Transmission'].values,
interpolation_kind=interpolation_kind,
filter_id=filter_id)
def __init__(self, wavelength, transmission_lambda,
interpolation_kind='linear', filter_id=None):
if not hasattr(wavelength, 'unit'):
raise ValueError('the wavelength needs to be a astropy quantity')
self.wavelength = wavelength
self.transmission_lambda = transmission_lambda
self.interpolation_object = interpolate.interp1d(self.wavelength,
self.transmission_lambda,
kind=interpolation_kind,
bounds_error=False,
fill_value=0.0)
self.filter_id = filter_id
def __mul__(self, other):
if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'):
raise ValueError('requiring a specutils.Spectrum1D-like object that'
'has attributes "flux" and "wavelength"')
#new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value,
# self.wavelength.value) * self.wavelength.unit
transmission = self.interpolate(other.wavelength)
return Spectrum1D.from_array(other.wavelength, transmission * other.flux)
def __rmul__(self, other):
return self.__mul__(other)
@utils.lazyproperty
def lambda_pivot(self):
"""
Calculate the pivotal wavelength as defined in Bessell & Murphy 2012
.. math::
\\lambda_\\textrm{pivot} = \\sqrt{
\\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\
<f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c}
"""
return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/
(np.trapz(self.transmission_lambda / self.wavelength, self.wavelength))))
@utils.lazyproperty
def wavelength_start(self):
return self.get_wavelength_start()
@utils.lazyproperty
def wavelength_end(self):
return self.get_wavelength_end()
@utils.lazyproperty
def zp_ab_f_lambda(self):
return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to(
'erg/s/cm^2/Angstrom', u.spectral())
@utils.lazyproperty
def zp_ab_f_nu(self):
return (3631 * u.Jy).to('erg/s/cm^2/Hz')
@utils.lazyproperty
def zp_vega_f_lambda(self):
return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) /
self.calculate_wavelength_delta())
def interpolate(self, wavelength):
"""
Interpolate the filter onto new wavelength grid
Parameters
----------
wavelength: ~astropy.units.Quantity
wavelength grid to interpolate on
"""
converted_wavelength = wavelength.to(self.wavelength.unit)
return self.interpolation_object(converted_wavelength)
def _calculuate_flux_density(self, wavelength, flux):
return _calculcate_filter_flux_density(flux, self)
def calculate_flux_density(self, spectrum):
return calculate_filter_flux_density(spectrum, self)
def calculate_f_lambda(self, spectrum):
return (self.calculate_flux_density(spectrum) /
self.calculate_wavelength_delta())
def calculate_wavelength_delta(self):
"""
Calculate the Integral :math:`\integral
:return:
"""
return np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength)
def calculate_weighted_average_wavelength(self):
"""
Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}`
Returns
: ~astropy.units.Quantity
"""
return (np.trapz(self.transmission_lambda * self.wavelength,
self.wavelength) / self.calculate_wavelength_delta())
def calculate_vega_magnitude(self, spectrum):
__doc__ = calculate_vega_magnitude.__doc__
return calculate_vega_magnitude(spectrum, self)
def calculate_ab_magnitude(self, spectrum):
__doc__ = calculate_ab_magnitude.__doc__
return calculate_ab_magnitude(spectrum, self)
def convert_ab_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_ab_f_lambda
def convert_vega_magnitude_to_f_lambda(self, mag):
return 10**(-0.4*mag) * self.zp_vega_f_lambda
def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={},
format_filter_id=None):
if scale_max is not None:
if hasattr(scale_max, 'unit'):
scale_max = scale_max.value
transmission = (self.transmission_lambda * scale_max
/ self.transmission_lambda.max())
else:
transmission = self.transmission_lambda
ax.plot(self.wavelength, transmission, **plot_kwargs)
ax.set_xlabel('Wavelength [{0}]'.format(
self.wavelength.unit.to_string(format='latex')))
ax.set_ylabel('Transmission [1]')
if make_label==True and self.filter_id is not None:
if format_filter_id is not None:
filter_id = format_filter_id(self.filter_id)
else:
filter_id = self.filter_id
text_x = (self.lambda_pivot).value
text_y = transmission.max()/2
ax.text(text_x, text_y, filter_id,
horizontalalignment='center', verticalalignment='center',
bbox=dict(facecolor='white', alpha=0.5))
def get_wavelength_start(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(threshold)]
def get_wavelength_end(self, threshold=0.01):
norm_cum_sum = (np.cumsum(self.transmission_lambda)
/ np.sum(self.transmission_lambda))
return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)]
class FilterCurve(BaseFilterCurve):
def __repr__(self):
if self.filter_id is None:
filter_id = "{0:x}".format(self.__hash__())
else:
filter_id = self.filter_id
return "FilterCurve <{0}>".format(filter_id)
class FilterSet(object):
"""
A set of filters
Parameters
----------
filter_set: ~list
a list of strings or a list of filters
interpolation_kind: ~str
scipy interpolaton kinds
"""
def __init__(self, filter_set, interpolation_kind='linear'):
if hasattr(filter_set[0], 'wavelength'):
self.filter_set = filter_set
else:
self.filter_set = [FilterCurve.load_filter(filter_id,
interpolation_kind=
interpolation_kind)
for filter_id in filter_set]
def __iter__(self):
self.current_filter_idx = 0
return self
def __next__(self):
try:
item = self.filter_set[self.current_filter_idx]
except IndexError:
raise StopIteration
self.current_filter_idx += 1
return item
next = __next__
def __getitem__(self, item):
return self.filter_set.__getitem__(item)
def __repr__(self):
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(
[item.filter_id
for item in self.filter_set]))
@property
def lambda_pivot(self):
return u.Quantity([item.lambda_pivot for item in self])
def calculate_f_lambda(self, spectrum):
return u.Quantity(
[item.calculate_f_lambda(spectrum) for item in self.filter_set])
def calculate_ab_magnitudes(self, spectrum):
mags = [item.calculate_ab_magnitude(spectrum)
for item in self.filter_set]
return mags
def calculate_vega_magnitudes(self, spectrum):
mags = [item.calculate_vega_magnitude(spectrum)
for item in self.filter_set]
return mags
def convert_ab_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_ab_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties(
self, magnitudes, magnitude_uncertainties):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambda_positive_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties, )])
f_lambda_negative_uncertainties = u.Quantity(
[filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty)
for filter, mag, mag_uncertainty in zip(
self.filter_set, magnitudes, magnitude_uncertainties)])
return np.abs(u.Quantity((f_lambda_positive_uncertainties,
f_lambda_negative_uncertainties))
- self.convert_vega_magnitudes_to_f_lambda(magnitudes))
def convert_vega_magnitudes_to_f_lambda(self, magnitudes):
if len(magnitudes) != len(self.filter_set):
raise ValueError("Filter set and magnitudes need to have the same "
"number of items")
f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag)
for filter, mag in zip(self.filter_set, magnitudes)]
return u.Quantity(f_lambdas)
def plot_spectrum(self, spectrum, ax, make_labels=True,
spectrum_plot_kwargs={}, filter_plot_kwargs={},
filter_color_list=None, format_filter_id=None):
"""
plot a spectrum with the given filters
spectrum:
ax:
make_labels:
:return:
"""
ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs)
for i, filter in enumerate(self.filter_set):
filter_scale = filter.calculate_f_lambda(spectrum)
if filter_color_list is not None:
filter_plot_kwargs['color'] = filter_color_list[i]
filter.plot(ax, scale_max=filter_scale, make_label=make_labels,
plot_kwargs=filter_plot_kwargs,
format_filter_id=format_filter_id)
class MagnitudeSet(FilterSet):
def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None,
interpolation_kind='linear'):
super(MagnitudeSet, self).__init__(filter_set,
interpolation_kind=
interpolation_kind)
self.magnitudes = np.array(magnitudes)
self.magnitude_uncertainties = np.array(magnitude_uncertainties)
def __repr__(self):
mag_str = '{0} {1:.4f} +/- {2:.4f}'
mag_data = []
for i, filter in enumerate(self.filter_set):
unc = (np.nan if self.magnitude_uncertainties is None
else self.magnitude_uncertainties[i])
mag_data.append(mag_str.format(filter.filter_id,
self.magnitudes[i], unc))
return "<{0} \n{1}>".format(self.__class__.__name__,
'\n'.join(mag_data))
| bsd-3-clause |
ybayle/ReproducibleResearchIEEE2017 | src/svmbff.py | 1 | 22789 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author Yann Bayle
# E-mail bayle.yann@live.fr
# License MIT
# Created 13/10/2016
# Updated 20/01/2017
# Version 1.0.0
#
"""
Description of svmbff.py
======================
bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe filename.mf -w out.arff
:Example:
python svmbff.py
"""
import os
import csv
import sys
import time
import utils
import shutil
import argparse
import multiprocessing
from statistics import stdev
from scipy.io import arff
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score
begin = int(round(time.time() * 1000))
def validate_arff(filename):
"""Description of validate_arff
Check if filename exists on path and is a file
If file corresponds to valid arff file return absolute path
Otherwise move file to invalid directory and return False
"""
# Check if file exists
if os.path.isfile(filename) and os.path.exists(filename):
filename = os.path.abspath(filename)
else:
return False
# If does not satisfy min size, move to "empty" folder
if os.stat(filename).st_size < 8100:
tmp_path = filename.split("/")
empty_dirname = "/".join(tmp_path[:-1]) + "/empty/"
if not os.path.exists(empty_dirname):
os.makedirs(empty_dirname)
shutil.move(filename, empty_dirname + tmp_path[-1])
return False
# # If filename does not match with feature name, move to "invalid" folder
# name_file = filename.split("/")[-1][:12]
# with open(filename) as filep:
# for i, line in enumerate(filep):
# if i == 70:
# # 71th line
# name_feat = line.split(" ")[2][1:13]
# break
# if name_file != name_feat:
# tmp_path = filename.split("/")
# invalid_dirname = "/".join(tmp_path[:-1]) + "/invalid/"
# if not os.path.exists(invalid_dirname):
# os.makedirs(invalid_dirname)
# shutil.move(filename, invalid_dirname + tmp_path[-1])
# return False
# If everything went well, return filename absolute path
return filename
def merge_arff(indir, outfilename):
"""Description of merge_arff
bextract program from Marsyas generate one output file per audio file
This function merge them all in one unique file
Check if analysed file are valid i.e. not empty
"""
utils.print_success("Preprocessing ARFFs")
indir = utils.abs_path_dir(indir)
filenames = os.listdir(indir)
outfn = open(outfilename, 'w')
cpt_invalid_fn = 0
# Write first lines of ARFF template file
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
with open(new_fn, 'r') as template:
nb_line = 74
for line in template:
if not nb_line:
break
nb_line -= 1
outfn.write(line)
break
else:
cpt_invalid_fn += 1
# Append all arff file to the output file
cur_file_num = 1
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
cur_file_num = cur_file_num + 1
utils.print_progress_start("Analysing file\t" + str(cur_file_num))
fname = open(new_fn, 'r')
outfn.write("".join(fname.readlines()[74:77]))
fname.close()
else:
cpt_invalid_fn += 1
utils.print_progress_end()
outfn.close()
# os.system("rm " + indir + "*.arff")
if cpt_invalid_fn:
utils.print_warning(str(cpt_invalid_fn) + " ARFF files with errors found")
return outfilename
def add_groundtruth(feature_fn, groundtruth_fn, output_fn):
"""Description of add_groundtruth
Write in output filename the groundtruth merged with corresponding features
..todo:: Error with old_tag not corresponding to filename...
"""
utils.print_success("Adding groundtruth")
feature_fn = utils.abs_path_file(feature_fn)
groundtruth_fn = utils.abs_path_file(groundtruth_fn)
if os.path.isfile(output_fn) and os.path.exists(output_fn):
utils.print_warning("Overwritting existing output file: " +
utils.abs_path_file(output_fn))
# TODO Read groundtruth file in memory
tmp_gt = csv.reader(open(groundtruth_fn, "r"))
groundtruths = {}
for row in tmp_gt:
groundtruths[row[0]] = row[1]
tags = []
output = open(output_fn, "w")
# switch if test set preprocessing
# separator = "_"
separator = "."
with open(feature_fn, "r") as feat:
line_num = 0
tmp_line = ""
for line in feat:
line_num += 1
if line_num > 74:
if line[0] != "%":
# Alter feature line with correct tag
cur_line = line.split(",")
old_tag = cur_line[-1].split(separator)[0]
if old_tag in groundtruths:
new_tag = groundtruths[old_tag]
output.write(tmp_line + ",".join(cur_line[:-1]) + "," + new_tag +"\n")
tmp_line = ""
tags.append(new_tag)
else:
# TODO
# File not in groundtruth
tmp_line = ""
# utils.print_warning("Error with " + old_tag)
else:
tmp_line += line
elif line_num == 2:
output.write("@relation train_test.arff\n")
# output.write("@relation MARSYAS_KEA\n")
elif line_num == 71:
# Alter line 71 containing all tag gathered along the way
# TODO enhance
output.write("@attribute output {i,s}\n")
else:
# Write header
output.write(line)
output.close()
def split_number(number, nb_folds):
"""Description of split_number
Return an int array of size nb_folds where the sum of cells = number
All the integers in cells are the same +-1
"""
if not isinstance(number, int) and not isinstance(nb_folds, int):
utils.print_error("Variable must be integer")
if number < nb_folds:
utils.print_error("Number of folds > Number of data available")
min_num = int(number/nb_folds)
folds = [min_num] * nb_folds
for num in range(0, number-(min_num*nb_folds)):
folds[num] = folds[num] + 1
return folds
def create_folds(filelist, nb_folds, folds_dir, invert_train_test=False):
"""Description of create_folds
"""
utils.print_success("Creating folds")
if nb_folds < 1:
utils.print_error("Wrong number of folds provided")
# folds_dir = "/".join(filelist.split("/")[:-1])
if nb_folds == 1:
# Train and test set are the same
folds_dir = folds_dir + "01_fold/"
utils.create_dir(folds_dir)
os.system("cp " + filelist + " " + folds_dir + "/train_test.arff")
else:
# Create train and test set
folds_dir = folds_dir + str(nb_folds).zfill(2) + "_folds/"
utils.create_dir(folds_dir)
# TODO
# Read filelist
# Extract name and tag
# Separate different tag
# create folds
data, meta = arff.loadarff(filelist)
tags = {}
for row in data:
tag = row[-1].decode("ascii")
if tag in tags:
tags[tag] += 1
else:
tags[tag] = 1
tags_folds = {}
tags_folds_index = {}
for tag in tags:
tags_folds[tag] = split_number(tags[tag], nb_folds)
tags_folds_index[tag] = 0
# Create empty folds
folds = {}
# Init empty folds
for index in range(0, nb_folds):
folds[index] = ""
# Fill folds with data
with open(filelist, "r") as filelist_pointer:
arff_header = ""
tmp = ""
for i, line in enumerate(filelist_pointer):
utils.print_progress_start("\t" + str(i))
# Until the 75th line
if i > 74:
# Process ARFF data
if "% " in line:
# Memorize line
tmp += line
else:
# Get line 3 and add it to corresponding fold
tag = line.split(",")[-1][:-1]
num_fold = tags_folds_index[tag]
if tags_folds[tag][num_fold] == 0:
tags_folds_index[tag] += 1
tags_folds[tag][tags_folds_index[tag]] -= 1
folds[tags_folds_index[tag]] += tmp + line
tmp = ""
else:
# Save ARFF header lines
arff_header += line
utils.print_progress_end
# At this point data has been split up in different part
# Use this part to create train/test split
if invert_train_test:
# Test is bigger than train
fn_with_min_data = "/train_"
fn_with_max_data = "/test_"
else:
# Train is bigger than test
fn_with_min_data = "/test_"
fn_with_max_data = "/train_"
for index_test in range(0, nb_folds):
filep = open(folds_dir + fn_with_min_data + str(index_test+1).zfill(2) + ".arff", "a")
filep.write(arff_header + folds[index_test])
filep.close()
filep = open(folds_dir + fn_with_max_data + str(index_test+1).zfill(2) + ".arff", "a")
filep.write(arff_header)
for index_train in range(0, nb_folds):
if index_train != index_test:
filep.write(folds[index_train])
filep.close()
return folds_dir
def process_results(in_fn, out_fn):
in_fn = utils.abs_path_file(in_fn)
out_fp = open(out_fn, "w")
with open(in_fn, "r") as filep:
for index, line in enumerate(filep):
if index % 2:
row = line[:-1].split("\t")
out_fp.write(row[0].split("_")[0] + "," + row[2] + "\n")
out_fp.close()
def experiment_2_3():
process_results("src/tmp/svmbff/SVMBFF.csv", "predictions/SVMBFF.csv")
def run_kea(train_file, test_file, out_file, verbose=False):
"""Description of run_kea
Launch kea classification on specified file
"""
kea_cmd = 'kea -m tags -w ' + train_file + ' -tw ' + test_file + ' -pr ' + out_file
if not verbose:
kea_cmd += "> /dev/null 2>&1"
os.system(kea_cmd)
train_dir = train_file.split(os.sep)
train_dir = os.sep.join(train_dir[:-1])
# os.system("rm " + train_dir + "/*affinities*")
test_dir = test_file.split(os.sep)
test_dir = os.sep.join(test_dir[:-1])
# os.system("rm " + test_dir + "/*affinities*")
def run_kea_on_folds(folds_dir):
"""Description of run_kea_on_folds
Wrapper for kea on folds
"""
folds_dir = utils.abs_path_dir(folds_dir)
out_file = folds_dir + "/results.txt"
if os.path.exists(folds_dir + "/train_test.arff"):
train_file = folds_dir + "/train_test.arff"
test_file = train_file
run_kea(train_file, test_file, out_file)
else:
nb_folds = len([name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name))])
# Run on multiple train/test
for index in range(1, int(nb_folds/2)+1):
utils.print_progress_start("Train/Test on fold " + str(index))
train_file = folds_dir + "/train_" + str(index).zfill(2) + ".arff"
test_file = folds_dir + "/test_" + str(index).zfill(2) + ".arff"
out_file = folds_dir + "/results_" + str(index).zfill(2) + ".arff"
run_kea(train_file, test_file, out_file)
utils.print_progress_end()
utils.print_warning("TODO multiprocessing")
# # Parallel computing on each TrainTestFolds
# printTitle("Parallel train & test of folds")
# partialRunTrainTestOnFold = partial(runTrainTestOnFold, args=args)
# pool = multiprocessing.Pool()
# pool.map(partialRunTrainTestOnFold, range(nb_folds)) #make our results with a map call
# pool.close() #we are not adding any more processes
# pool.join() #tell it to wait until all threads are done before going on
def extract_feat_train():
dirs = ["/media/sf_SharedFolder/DataSets/Jamendo/Yann/song/",
"/media/sf_SharedFolder/DataSets/ccmixter_corpus/instru/",
"/media/sf_SharedFolder/DataSets/MedleyDB/MedleyDB/instru/vrai/"]
outdir= "res/"
for indir in dirs:
extensions = ["wav", "mp3"]
filenames = [fn for fn in os.listdir(indir)
if any(fn.endswith(ext) for ext in extensions)]
for index, filename in enumerate(filenames):
dirName = indir.split("/")[-2] + ".mf"
with open(dirName, "w") as filep:
filep.write(indir + filename + "\n")
outfilename = outdir + filename[:-3].replace(" ", "_") + "arff"
bextract_cmd = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe " + dirName + " -w " + outfilename
os.system(bextract_cmd)
def read_gts(filename):
filename = utils.abs_path_file(filename)
groundtruths = {}
i = 0
with open(filename, "r") as filep:
for index, line in enumerate(filep):
if index > 73:
if i == 0:
i += 1
name = line.split("/")[-1][:-1]
elif i == 1:
i += 1
elif i == 2:
i = 0
groundtruths[name] = line.split(",")[-1][:-1]
return groundtruths
def read_preds(filename):
pres_filen = utils.abs_path_file(filename)
predictions = {}
i = 0
with open(filename, "r") as filep:
for index, line in enumerate(filep):
if index % 2:
line = line.split("\t")
name = line[0].split("/")[-1]
pred = float(line[-1])
if pred > 0.5:
predictions[name] = "s"
else:
predictions[name] = "i"
return predictions
def figure2():
# folds_dir = create_folds("results/dataset.arff", 5)
# run_kea_on_folds(folds_dir)
# read results arff file and print accuracy and f-measure
gts_filen = "results/dataset.arff"
gts = read_gts(gts_filen)
folds_dir = "results/05_folds/"
res_files = [name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name]
acc = []
f1 = []
for res in res_files:
predictions = []
groundtruths = []
preds = read_preds(folds_dir + res)
for name in preds:
if name in gts:
groundtruths.append(gts[name])
predictions.append(preds[name])
acc.append(accuracy_score(groundtruths, predictions))
predictions = [1 if i=="s" else 0 for i in predictions]
groundtruths = [1 if i=="s" else 0 for i in groundtruths]
f1.append(f1_score(groundtruths, predictions, average='weighted'))
# Print average ± standard deviation
print("Accuracy " + str(sum(acc)/float(len(acc))) + " ± " + str(stdev(acc)))
print("F-Measure " + str(sum(f1)/float(len(f1))) + " ± " + str(stdev(f1)))
dir_stats = utils.create_dir("stats/")
with open(dir_stats + "table1_accuracy.csv", "a") as filep:
filep.write("SVMBFF")
for val in acc:
filep.write("," + str(val))
filep.write("\n")
with open(dir_stats + "table1_f1.csv", "a") as filep:
filep.write("SVMBFF")
for val in f1:
filep.write("," + str(val))
filep.write("\n")
# with open(dir_stats + "table1_accuracy.csv", "a") as filep:
# for val in acc:
# filep.write("SVMBFF," + str(val) + "\n")
# with open(dir_stats + "table1_f1.csv", "a") as filep:
# for val in f1:
# filep.write("SVMBFF," + str(val) + "\n")
def extract_features(tracks_dir="tracks/", feat_dir="features/"):
utils.print_success("Extracting features")
tracks_fn = os.listdir(tracks_dir)
utils.create_dir(feat_dir)
feat_dir = utils.create_dir(feat_dir + "svmbff")
bextract = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe "
for index, filename in enumerate(tracks_fn):
utils.print_progress_start(str(index) + "/" + str(len(tracks_fn)) + " " + filename)
track_path = filename + ".mf"
with open(track_path, "w") as filep:
filep.write(tracks_dir + filename + "\n")
new_fn = filename.split(".")[0] + ".arff"
try:
os.system(bextract + track_path + " -w " + new_fn + "> /dev/null 2>&1")
except:
utils.print_info("You have to make marsyas available systemwide, tips:")
utils.print_info("http://marsyas.info/doc/manual/marsyas-user/Step_002dby_002dstep-building-instructions.html#Step_002dby_002dstep-building-instructions")
utils.print_info("http://stackoverflow.com/a/21173918")
utils.print_error("Program exit")
# print(new_fn)
# print(feat_dir + " " + new_fn)
os.rename(new_fn, feat_dir + new_fn)
# os.rename("MARSYAS_EMPTY" + new_fn, feat_dir + new_fn)
os.system("rm " + track_path)
utils.print_progress_end()
os.system("rm bextract_single.mf")
def table1_exp1(folds_dir):
utils.print_success("Experiment 1 in Table 1")
fn_gts = "groundtruths/database1.csv"
gts = utils.read_groundtruths(fn_gts)
res_files = [name for name in os.listdir(folds_dir) if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name]
acc = []
f1 = []
for res in res_files:
predictions = []
groundtruths = []
preds = read_preds(folds_dir + res)
for name in preds:
name_gts = name.split(".")[0]
if name_gts in gts:
groundtruths.append(gts[name_gts])
predictions.append(preds[name])
acc.append(accuracy_score(groundtruths, predictions))
predictions = [1 if i=="s" else 0 for i in predictions]
groundtruths = [1 if i=="s" else 0 for i in groundtruths]
f1.append(f1_score(groundtruths, predictions, average='binary'))
# Print average ± standard deviation
utils.print_info("Accuracy " + str(sum(acc)/float(len(acc))) + " ± " + str(stdev(acc)))
utils.print_info("F-Measure " + str(sum(f1)/float(len(f1))) + " ± " + str(stdev(f1)))
dir_res = utils.create_dir("stats/")
with open(dir_res + "table1_accuracy.csv", "a") as filep:
for val in acc:
filep.write("SVMBFF," + str(val) + "\n")
with open(dir_res + "table1_f1.csv", "a") as filep:
for val in f1:
filep.write("SVMBFF," + str(val) + "\n")
def experiment_1(folder="."):
utils.print_success("SVMBFF Experiment 1 (approx. 1 minutes)")
# Variables
folder = utils.abs_path_dir(folder)
dir_tmp = utils.create_dir(folder + "src/tmp/")
dir_svmbff = utils.create_dir(dir_tmp + "svmbff/")
dir_tracks = folder + "tracks/"
dir_feat = folder + "features/svmbff/"
fn_feats_db1 = dir_svmbff + "svmbff_database1.arff"
feats_gts_db1 = folder + "features/" + "svmbff_database1.arff"
groundtruths = folder + "groundtruths/database1.csv"
extract_features(dir_tracks)
merge_arff(dir_feat, fn_feats_db1)
add_groundtruth(fn_feats_db1, groundtruths, feats_gts_db1)
os.remove(fn_feats_db1)
dir_folds = create_folds(feats_gts_db1, 5, dir_svmbff)
run_kea_on_folds(dir_folds)
table1_exp1(dir_folds)
def main():
utils.print_success("SVMBFF (approx. 2 minutes)")
experiment_1(folder="../")
if __name__ == "__main__":
PARSER = argparse.ArgumentParser(description="Validate list of ISRCs")
PARSER.add_argument(
"-i",
"--input_dir",
help="input directory containing all ARFF file from Marsyas bextract",
type=str,
default="data",
metavar="input_dir")
PARSER.add_argument(
"-o",
"--output_file",
help="output file",
type=str,
default="feat_with_groundtruth.txt",
metavar="output_file")
PARSER.add_argument(
"-g",
"--groundtruth_file",
help="groundtruth file",
type=str,
default="groundtruth.txt",
metavar="groundtruth_file")
PARSER.add_argument(
"-n",
"--nb_folds",
default=1,
type=int,
metavar="nb_folds",
help="classification folds number, must be >= 1, default = 1")
main()
# figure2()
# indir1 = "res/"
# indir2 = "/media/sf_DATA/Datasets/Simbals/new/201611/arff/"
# merge_arff(indir2, "test2.arff")
# utils.print_success("Kea classification")
# # Variable declaration
# input_dir = PARSER.parse_args().input_dir
# res_dir = "analysis"
# utils.create_dir(res_dir)
# if input_dir[-1] == "/":
# input_dir = input_dir[:-1]
# proj_dir = res_dir + "/" + input_dir.split("/")[-1]
# utils.create_dir(proj_dir)
# feat_without_groundtruth = proj_dir + "/feat_without_groundtruth.arff"
# feat_with_groundtruth = proj_dir + "/" + PARSER.parse_args().output_file
# # Functions call
# merge_arff(input_dir, feat_without_groundtruth)
# add_groundtruth(feat_without_groundtruth,
# PARSER.parse_args().groundtruth_file,
# feat_with_groundtruth)
# os.remove(feat_without_groundtruth)
# folds_dir = create_folds(feat_with_groundtruth, PARSER.parse_args().nb_folds, invert_train_test=True)
# folds_dir = create_folds("results/train_kea.arff", 5)
# run_kea_on_folds(folds_dir)
# # 2 merge all arff files dans train/test file (generate train/test folds/set,
# # reuse vqmm) à partir des fichiers sources d'un autre dossier, tout copier
# # dans dossier de svmbff. no-overlap train/Test
# # 3 lancer kea sur toutes les train/test
# # 4 Afficher les résultats
# utils.print_success("Finished in " + str(int(round(time.time() * 1000)) - begin) + "ms")
# """
# kea -m tags -w ' + train_file + ' -tw ' + test_file + ' -pr ' + out_file
# """
| mit |
MJuddBooth/pandas | pandas/tests/series/test_block_internals.py | 2 | 1472 | # -*- coding: utf-8 -*-
import pandas as pd
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestSeriesBlockInternals(object):
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz Series inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = pd.date_range('20130101', periods=3, tz='US/Eastern')
ts = dti[1]
ser = pd.Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti.freq == 'D'
ser.iloc[1] = pd.NaT
assert ser._values.freq is None
# check that the DatetimeIndex was not altered in place
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti[1] == ts
assert dti.freq == 'D'
def test_dt64tz_setitem_does_not_mutate_dti(self):
# GH#21907, GH#24096
dti = pd.date_range('2016-01-01', periods=10, tz='US/Pacific')
ts = dti[0]
ser = pd.Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert ser._data.blocks[0].values is not dti
assert (ser._data.blocks[0].values._data.base
is not dti._data._data.base)
ser[::3] = pd.NaT
assert ser[0] is pd.NaT
assert dti[0] == ts
| bsd-3-clause |
xunilrj/sandbox | courses/course-edx-dat2031x/Simulation.py | 1 | 2680 | # -*- coding: utf-8 -*-
def sim_normal(nums, mean = 600, sd = 30):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.normal(loc = mean, scale = sd, size = n)
titl = 'Normal distribution with ' + str(n) + ' values'
print('Summary for ' + str(n) + ' samples')
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def sim_poisson(nums, mean = 600):
import numpy as np
import numpy.random as nr
for n in nums:
dist = nr.poisson(lam = mean, size = n)
titl = 'Poisson distribution with ' + str(n) + ' values'
print(dist_summary(dist, titl))
print('Emperical 95% CIs')
print(np.percentile(dist, [2.5, 97.5]))
print(' ')
return('Done!')
def dist_summary(dist, names = 'dist_name'):
import pandas as pd
import matplotlib.pyplot as plt
ser = pd.Series(dist)
fig = plt.figure(1, figsize=(9, 6))
ax = fig.gca()
ser.hist(ax = ax, bins = 120)
ax.set_title('Frequency distribution of ' + names)
ax.set_ylabel('Frequency')
plt.show()
return(ser.describe())
def gen_profits(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [5 if x < 0.3 else (3.5 if x < 0.6 else 4) for x in unif]
return(out)
def gen_tips(num):
import numpy.random as nr
unif = nr.uniform(size = num)
out = [0 if x < 0.5 else (0.25 if x < 0.7
else (1.0 if x < 0.9 else 2.0)) for x in unif]
return(out)
def sim_lemonade(num, mean = 600, sd = 30, pois = False):
## Simulate the profits and tips for
## a lemonade stand.
import numpy.random as nr
## number of customer arrivals
if pois:
arrivals = nr.poisson(lam = mean, size = num)
else:
arrivals = nr.normal(loc = mean, scale = sd, size = num)
print(dist_summary(arrivals, 'customer arrivals per day'))
## Compute distibution of average profit per arrival
proft = gen_profits(num)
print(dist_summary(proft, 'profit per arrival'))
## Total profits are profit per arrival
## times number of arrivals.
total_profit = arrivals * proft
print(dist_summary(total_profit, 'total profit per day'))
## Compute distribution of average tips per arrival
tps = gen_tips(num)
print(dist_summary(tps, 'tips per arrival'))
## Compute average tips per day
total_tips = arrivals * tps
print(dist_summary(total_tips, 'total tips per day'))
## Compute total profits plus total tips.
total_take = total_profit + total_tips
return(dist_summary(total_take, 'total net per day'))
| apache-2.0 |
EconForge/Smolyak | doc/sphinxext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
joergkappes/opengm | src/interfaces/python/examples/python_visitor_gui.py | 14 | 1377 | """
Usage: python_visitor_gui.py
This script shows how one can implement visitors
in pure python and inject them into OpenGM solver.
( not all OpenGM solvers support this kind of
code injection )
"""
import opengm
import numpy
import matplotlib
from matplotlib import pyplot as plt
shape=[100,100]
numLabels=10
unaries=numpy.random.rand(shape[0], shape[1],numLabels)
potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4)
gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts)
inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5))
class PyCallback(object):
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3))
matplotlib.interactive(True)
def begin(self,inference):
print "begin of inference"
def end(self,inference):
print "end of inference"
def visit(self,inference):
gm=inference.gm()
labelVector=inference.arg()
print "energy ",gm.evaluate(labelVector)
labelVector=labelVector.reshape(self.shape)
plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest")
plt.draw()
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
argmin=inf.arg()
| mit |
UCBerkeleySETI/blimpy | blimpy/plotting/plot_time_series.py | 1 | 1628 | from .config import *
from ..utils import rebin, db
from .plot_utils import calc_extent
def plot_time_series(wf, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = wf.grab_data(f_start, f_stop, if_id)
# Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = np.nanmean(plot_data, axis=1)
else:
plot_data = np.nanmean(plot_data)
if logged and wf.header['nbits'] >= 8:
plot_data = db(plot_data)
# Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = calc_extent(wf, plot_f=plot_f, plot_t=wf.timestamps, MJD_time=MJD_time)
plot_t = np.linspace(extent[2], extent[3], len(wf.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both', tight=True)
| bsd-3-clause |
MadsJensen/agency_connectivity | make_df_hilbert_data.py | 1 | 1383 | import numpy as np
import pandas as pd
import scipy.io as sio
from my_settings import *
data = sio.loadmat("/home/mje/Projects/agency_connectivity/Data/data_all.mat")[
"data_all"]
column_keys = ["subject", "trial", "condition", "shift"]
result_df = pd.DataFrame(columns=column_keys)
for k, subject in enumerate(subjects):
p8_invol_shift = data[k, 3] - np.mean(data[k, 0])
p8_vol_shift = data[k, 2] - np.mean(data[k, 0])
p8_vol_bs_shift = data[k, 1] - np.mean(data[k, 0])
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol_bs",
"shift": p8_vol_bs_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "vol",
"shift": p8_vol_shift[j + 1][0]}])
result_df = result_df.append(row, ignore_index=True)
for j in range(89):
row = pd.DataFrame([{"trial": int(j),
"subject": subject,
"condition": "invol",
"shift": p8_invol_shift[j][0]}])
result_df = result_df.append(row, ignore_index=True)
| bsd-3-clause |
pyIMS/pyimzML | pyimzml/ImzMLParser.py | 2 | 24463 | # -*- coding: utf-8 -*-
# Copyright 2015 Dominik Fay
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import bisect_left, bisect_right
import sys
import re
from pathlib import Path
from warnings import warn
import numpy as np
from pyimzml.metadata import Metadata, SpectrumData
from pyimzml.ontology.ontology import convert_cv_param
PRECISION_DICT = {"32-bit float": 'f', "64-bit float": 'd', "32-bit integer": 'i', "64-bit integer": 'l'}
SIZE_DICT = {'f': 4, 'd': 8, 'i': 4, 'l': 8}
INFER_IBD_FROM_IMZML = object()
XMLNS_PREFIX = "{http://psi.hupo.org/ms/mzml}"
param_group_elname = "referenceableParamGroup"
data_processing_elname = "dataProcessing"
instrument_confid_elname = "instrumentConfiguration"
def choose_iterparse(parse_lib=None):
if parse_lib == 'ElementTree':
from xml.etree.ElementTree import iterparse
elif parse_lib == 'lxml':
from lxml.etree import iterparse
else:
try:
from lxml.etree import iterparse
except ImportError:
from xml.etree.ElementTree import iterparse
return iterparse
def _get_cv_param(elem, accession, deep=False, convert=False):
base = './/' if deep else ''
node = elem.find('%s%scvParam[@accession="%s"]' % (base, XMLNS_PREFIX, accession))
if node is not None:
if convert:
return convert_cv_param(accession, node.get('value'))
return node.get('value')
class ImzMLParser:
"""
Parser for imzML 1.1.0 files (see specification here:
http://imzml.org/download/imzml/specifications_imzML1.1.0_RC1.pdf).
Iteratively reads the .imzML file into memory while pruning the per-spectrum metadata (everything in
<spectrumList> elements) during initialization. Returns a spectrum upon calling getspectrum(i). The binary file
is read in every call of getspectrum(i). Use enumerate(parser.coordinates) to get all coordinates with their
respective index. Coordinates are always 3-dimensional. If the third spatial dimension is not present in
the data, it will be set to zero.
The global metadata fields in the imzML file are stored in parser.metadata.
Spectrum-specific metadata fields are not stored by default due to avoid memory issues,
use the `include_spectra_metadata` parameter if spectrum-specific metadata is needed.
"""
def __init__(
self,
filename,
parse_lib=None,
ibd_file=INFER_IBD_FROM_IMZML,
include_spectra_metadata=None,
):
"""
Opens the two files corresponding to the file name, reads the entire .imzML
file and extracts required attributes. Does not read any binary data, yet.
:param filename:
name of the XML file. Must end with .imzML. Binary data file must be named equally but ending with .ibd
Alternatively an open file or Buffer Protocol object can be supplied, if ibd_file is also supplied
:param parse_lib:
XML-parsing library to use: 'ElementTree' or 'lxml', the later will be used if argument not provided
:param ibd_file:
File or Buffer Protocol object for the .ibd file. Leave blank to infer it from the imzml filename.
Set to None if no data from the .ibd file is needed (getspectrum calls will not work)
:param include_spectra_metadata:
None, 'full', or a list/set of accession IDs.
If 'full' is given, parser.spectrum_full_metadata will be populated with a list of
complex objects containing the full metadata for each spectrum.
If a list or set is given, parser.spectrum_metadata_fields will be populated with a dict mapping
accession IDs to lists. Each list will contain the values for that accession ID for
each spectrum. Note that for performance reasons, this mode only searches the
spectrum itself for the value. It won't check any referenced referenceable param
groups if the accession ID isn't present in the spectrum metadata.
"""
# ElementTree requires the schema location for finding tags (why?) but
# fails to read it from the root element. As this should be identical
# for all imzML files, it is hard-coded here and prepended before every tag
self.sl = "{http://psi.hupo.org/ms/mzml}"
# maps each imzML number format to its struct equivalent
self.precisionDict = dict(PRECISION_DICT)
# maps each number format character to its amount of bytes used
self.sizeDict = dict(SIZE_DICT)
self.filename = filename
self.mzOffsets = []
self.intensityOffsets = []
self.mzLengths = []
self.intensityLengths = []
# list of all (x,y,z) coordinates as tuples.
self.coordinates = []
self.root = None
self.metadata = None
if include_spectra_metadata == 'full':
self.spectrum_full_metadata = []
elif include_spectra_metadata is not None:
include_spectra_metadata = set(include_spectra_metadata)
self.spectrum_metadata_fields = {
k: [] for k in include_spectra_metadata
}
self.mzGroupId = self.intGroupId = self.mzPrecision = self.intensityPrecision = None
self.iterparse = choose_iterparse(parse_lib)
self.__iter_read_spectrum_meta(include_spectra_metadata)
if ibd_file is INFER_IBD_FROM_IMZML:
# name of the binary file
ibd_filename = self._infer_bin_filename(self.filename)
self.m = open(ibd_filename, "rb")
else:
self.m = ibd_file
# Dict for basic imzML metadata other than those required for reading
# spectra. See method __readimzmlmeta()
self.imzmldict = self.__readimzmlmeta()
self.imzmldict['max count of pixels z'] = np.asarray(self.coordinates)[:,2].max()
@staticmethod
def _infer_bin_filename(imzml_path):
imzml_path = Path(imzml_path)
ibd_path = [f for f in imzml_path.parent.glob('*')
if re.match(r'.+\.ibd', str(f), re.IGNORECASE) and f.stem == imzml_path.stem][0]
return str(ibd_path)
# system method for use of 'with ... as'
def __enter__(self):
return self
# system method for use of 'with ... as'
def __exit__(self, exc_t, exc_v, trace):
if self.m is not None:
self.m.close()
def __iter_read_spectrum_meta(self, include_spectra_metadata):
"""
This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or
"IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
"""
mz_group = int_group = None
slist = None
elem_iterator = self.iterparse(self.filename, events=("start", "end"))
if sys.version_info > (3,):
_, self.root = next(elem_iterator)
else:
_, self.root = elem_iterator.next()
for event, elem in elem_iterator:
if elem.tag == self.sl + "spectrumList" and event == "start":
self.__process_metadata()
slist = elem
elif elem.tag == self.sl + "spectrum" and event == "end":
self.__process_spectrum(elem, include_spectra_metadata)
slist.remove(elem)
self.__fix_offsets()
def __fix_offsets(self):
# clean up the mess after morons who use signed 32-bit where unsigned 64-bit is appropriate
def fix(array):
fixed = []
delta = 0
prev_value = float('nan')
for value in array:
if value < 0 and prev_value >= 0:
delta += 2**32
fixed.append(value + delta)
prev_value = value
return fixed
self.mzOffsets = fix(self.mzOffsets)
self.intensityOffsets = fix(self.intensityOffsets)
def __process_metadata(self):
if self.metadata is None:
self.metadata = Metadata(self.root)
for param_id, param_group in self.metadata.referenceable_param_groups.items():
if 'm/z array' in param_group.param_by_name:
self.mzGroupId = param_id
for name, dtype in self.precisionDict.items():
if name in param_group.param_by_name:
self.mzPrecision = dtype
if 'intensity array' in param_group.param_by_name:
self.intGroupId = param_id
for name, dtype in self.precisionDict.items():
if name in param_group.param_by_name:
self.intensityPrecision = dtype
if not hasattr(self, 'mzPrecision'):
raise RuntimeError("Could not determine m/z precision")
if not hasattr(self, 'intensityPrecision'):
raise RuntimeError("Could not determine intensity precision")
def __process_spectrum(self, elem, include_spectra_metadata):
arrlistelem = elem.find('%sbinaryDataArrayList' % self.sl)
mz_group = None
int_group = None
for e in arrlistelem:
ref = e.find('%sreferenceableParamGroupRef' % self.sl).attrib["ref"]
if ref == self.mzGroupId:
mz_group = e
elif ref == self.intGroupId:
int_group = e
self.mzOffsets.append(int(_get_cv_param(mz_group, 'IMS:1000102')))
self.mzLengths.append(int(_get_cv_param(mz_group, 'IMS:1000103')))
self.intensityOffsets.append(int(_get_cv_param(int_group, 'IMS:1000102')))
self.intensityLengths.append(int(_get_cv_param(int_group, 'IMS:1000103')))
scan_elem = elem.find('%sscanList/%sscan' % (self.sl, self.sl))
x = _get_cv_param(scan_elem, 'IMS:1000050')
y = _get_cv_param(scan_elem, 'IMS:1000051')
z = _get_cv_param(scan_elem, 'IMS:1000052')
if z is not None:
self.coordinates.append((int(x), int(y), int(z)))
else:
self.coordinates.append((int(x), int(y), 1))
if include_spectra_metadata == 'full':
self.spectrum_full_metadata.append(
SpectrumData(elem, self.metadata.referenceable_param_groups)
)
elif include_spectra_metadata:
for param in include_spectra_metadata:
value = _get_cv_param(elem, param, deep=True, convert=True)
self.spectrum_metadata_fields[param].append(value)
def __readimzmlmeta(self):
"""
DEPRECATED - use self.metadata instead, as it has much greater detail and allows for
multiple scan settings / instruments.
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from
the .imzML file.
:return d:
dict containing above mentioned meta data
:rtype:
dict
:raises Warning:
if an xml attribute has a number format different from the imzML specification
"""
d = {}
scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl)
instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl)
scan_settings_params = [
("max count of pixels x", "IMS:1000042"),
("max count of pixels y", "IMS:1000043"),
("max dimension x", "IMS:1000044"),
("max dimension y", "IMS:1000045"),
("pixel size x", "IMS:1000046"),
("pixel size y", "IMS:1000047"),
("matrix solution concentration", "MS:1000835"),
]
instrument_config_params = [
("wavelength", "MS:1000843"),
("focus diameter x", "MS:1000844"),
("focus diameter y", "MS:1000845"),
("pulse energy", "MS:1000846"),
("pulse duration", "MS:1000847"),
("attenuation", "MS:1000848"),
]
for name, accession in scan_settings_params:
try:
val = _get_cv_param(scan_settings_list_elem, accession, deep=True, convert=True)
if val is not None:
d[name] = val
except ValueError:
warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name))
for name, accession in instrument_config_params:
try:
val = _get_cv_param(instrument_config_list_elem, accession, deep=True, convert=True)
if val is not None:
d[name] = val
except ValueError:
warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name))
return d
def get_physical_coordinates(self, i):
"""
For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
"""
try:
pixel_size_x = self.imzmldict["pixel size x"]
pixel_size_y = self.imzmldict["pixel size y"]
except KeyError:
raise KeyError("Could not find all pixel size attributes in imzML file")
image_x, image_y = self.coordinates[i][:2]
return image_x * pixel_size_x, image_y * pixel_size_y
def getspectrum(self, index):
"""
Reads the spectrum at specified index from the .ibd file.
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired mass
spectrum
intensity_array: numpy.ndarray
Sequence of intensity values corresponding to mz_array
"""
mz_bytes, intensity_bytes = self.get_spectrum_as_string(index)
mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision)
intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision)
return mz_array, intensity_array
def get_spectrum_as_string(self, index):
"""
Reads m/z array and intensity array of the spectrum at specified location
from the binary file as a byte string. The string can be unpacked by the struct
module. To get the arrays as numbers, use getspectrum
:param index:
Index of the desired spectrum in the .imzML file
:rtype: Tuple[str, str]
Output:
mz_string:
string where each character represents a byte of the mz array of the
spectrum
intensity_string:
string where each character represents a byte of the intensity array of
the spectrum
"""
offsets = [self.mzOffsets[index], self.intensityOffsets[index]]
lengths = [self.mzLengths[index], self.intensityLengths[index]]
lengths[0] *= self.sizeDict[self.mzPrecision]
lengths[1] *= self.sizeDict[self.intensityPrecision]
self.m.seek(offsets[0])
mz_string = self.m.read(lengths[0])
self.m.seek(offsets[1])
intensity_string = self.m.read(lengths[1])
return mz_string, intensity_string
def portable_spectrum_reader(self):
"""
Builds a PortableSpectrumReader that holds the coordinates list and spectrum offsets in the .ibd file
so that the .ibd file can be read without opening the .imzML file again.
The PortableSpectrumReader can be safely pickled and unpickled, making it useful for reading the spectra
in a distributed environment such as PySpark or PyWren.
"""
return PortableSpectrumReader(self.coordinates,
self.mzPrecision, self.mzOffsets, self.mzLengths,
self.intensityPrecision, self.intensityOffsets, self.intensityLengths)
def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum):
"""
Get an image representation of the intensity distribution
of the ion with specified m/z value.
By default, the intensity values within the tolerance region are summed.
:param p:
the ImzMLParser (or anything else with similar attributes) for the desired dataset
:param mz_value:
m/z value for which the ion image shall be returned
:param tol:
Absolute tolerance for the m/z value, such that all ions with values
mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1
:param z:
z Value if spectrogram is 3-dimensional.
:param reduce_func:
the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must
be a function that takes a sequence as input and outputs a number. By default, the values are summed.
:return:
numpy matrix with each element representing the ion intensity in this
pixel. Can be easily plotted with matplotlib
"""
tol = abs(tol)
im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"]))
for i, (x, y, z_) in enumerate(p.coordinates):
if z_ == 0:
UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)")
if z_ == z:
mzs, ints = map(lambda x: np.asarray(x), p.getspectrum(i))
min_i, max_i = _bisect_spectrum(mzs, mz_value, tol)
im[y - 1, x - 1] = reduce_func(ints[min_i:max_i+1])
return im
def browse(p):
"""
Create a per-spectrum metadata browser for the parser.
Usage::
# get a list of the instrument configurations used in the first pixel
instrument_configurations = browse(p).for_spectrum(0).get_ids("instrumentConfiguration")
Currently, ``instrumentConfiguration``, ``dataProcessing`` and ``referenceableParamGroup`` are supported.
For browsing all spectra iteratively, you should by all means use **ascending** indices. Doing otherwise can result
in quadratic runtime. The following example shows how to retrieve all unique instrumentConfigurations used::
browser = browse(p)
all_config_ids = set()
for i, _ in enumerate(p.coordinates):
all_config_ids.update(browser.for_spectrum(i).get_ids("instrumentConfiguration"))
This is a list of ids with which you can find the corresponding ``<instrumentConfiguration>`` tag in the xml tree.
:param p: the parser
:return: the browser
"""
return _ImzMLMetaDataBrowser(p.root, p.filename, p.sl)
def _bisect_spectrum(mzs, mz_value, tol):
ix_l, ix_u = bisect_left(mzs, mz_value - tol), bisect_right(mzs, mz_value + tol) - 1
if ix_l == len(mzs):
return len(mzs), len(mzs)
if ix_u < 1:
return 0, 0
if ix_u == len(mzs):
ix_u -= 1
if mzs[ix_l] < (mz_value - tol):
ix_l += 1
if mzs[ix_u] > (mz_value + tol):
ix_u -= 1
return ix_l, ix_u
class _ImzMLMetaDataBrowser(object):
def __init__(self, root, fn, sl):
self._root = root
self._sl = sl
self._fn = fn
self._iter, self._previous, self._list_elem = None, None, None
self.iterparse = choose_iterparse()
def for_spectrum(self, i):
if self._previous is None or i <= self._previous:
self._iter = self.iterparse(self._fn, events=("start", "end"))
for event, s in self._iter:
if s.tag == self._sl + "spectrumList" and event == "start":
self._list_elem = s
elif s.tag == self._sl + "spectrum" and event == "end":
self._list_elem.remove(s)
if s.attrib["index"] == str(i):
self._previous = i
return _SpectrumMetaDataBrowser(self._root, self._sl, s)
class _SpectrumMetaDataBrowser(object):
def __init__(self, root, sl, spectrum):
self._root = root
self._sl = sl
self._spectrum = spectrum
def get_ids(self, element):
param_methods = {
param_group_elname: self._find_referenceable_param_groups,
data_processing_elname: self._find_data_processing,
instrument_confid_elname: self._find_instrument_configurations,
}
try:
return param_methods[element]()
except KeyError as e:
raise ValueError("Unsupported element: " + str(element))
def _find_referenceable_param_groups(self):
param_group_refs = self._spectrum.findall("%sreferenceableParamGroupRef" % self._sl)
ids = map(lambda g: g.attrib["ref"], param_group_refs)
return ids
def _find_instrument_configurations(self):
ids = None
scan_list = self._spectrum.find("%sscanList" % self._sl)
if scan_list:
scans = scan_list.findall("%sscan[@instrumentConfigurationRef]" % self._sl)
ids = map(lambda s: s.attrib["instrumentConfigurationRef"], scans)
if not ids:
run = self._root.find("%srun")
try:
return [run.attrib["defaultInstrumentConfigurationRef"]]
except KeyError as _:
return list()
else:
return ids
def _find_data_processing(self):
try:
return self._spectrum.attrib["dataProcessingRef"]
except KeyError as _:
spectrum_list = self._root.find("%srun/%sspectrumList" % tuple(2 * [self._sl]))
try:
return [spectrum_list.attrib["defaultDataProcessingRef"]]
except KeyError as _:
return []
class PortableSpectrumReader(object):
"""
A pickle-able class for holding the minimal set of data required for reading,
without holding any references to open files that wouldn't survive pickling.
"""
def __init__(self, coordinates, mzPrecision, mzOffsets, mzLengths,
intensityPrecision, intensityOffsets, intensityLengths):
self.coordinates = coordinates
self.mzPrecision = mzPrecision
self.mzOffsets = mzOffsets
self.mzLengths = mzLengths
self.intensityPrecision = intensityPrecision
self.intensityOffsets = intensityOffsets
self.intensityLengths = intensityLengths
def read_spectrum_from_file(self, file, index):
"""
Reads the spectrum at specified index from the .ibd file.
:param file:
File or file-like object for the .ibd file
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired mass
spectrum
intensity_array: numpy.ndarray
Sequence of intensity values corresponding to mz_array
"""
file.seek(self.mzOffsets[index])
mz_bytes = file.read(self.mzLengths[index] * SIZE_DICT[self.mzPrecision])
file.seek(self.intensityOffsets[index])
intensity_bytes = file.read(self.intensityLengths[index] * SIZE_DICT[self.intensityPrecision])
mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision)
intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision)
return mz_array, intensity_array
| apache-2.0 |
InnovArul/codesmart | Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_soft_epsilon.py | 1 | 3861 | from __future__ import print_function
import numpy as np
from grid import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
import matplotlib.pyplot as plt
from monte_carlo_exploring_starts import max_dict
EPS = 1e-4
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = {'U', 'D', 'L', 'R'}
def random_action(a, eps=0.1):
p = np.random.random()
if(p < 1 - eps):
return a
else:
return np.random.choice(list(ALL_POSSIBLE_ACTIONS))
# monte carlo sampling - finding out optimal policy (policy iteration)
def play_game(grid, policy):
all_states = list(grid.actions.keys())
state = (2, 0)
# instead of taking random action at first step, consider the action which is probabilistic with the policy
a = random_action(policy[state])
grid.set_state(state)
states_actions_rewards = [(state, a, 0)] # action is corresponding to the one which is going to be taken
while True:
r = grid.move(a)
state = grid.current_state()
#print(prev_state)
# if game over, break the loop
if grid.game_over():
states_actions_rewards.append((state, None, r)) # agent has hit the wall and we should not allow it to happen
break
else:
# collect the next action that we are gonna take and insert into the trace
a = random_action(policy[state])
states_actions_rewards.append((state, a, r))
# calculate the returns by working backwards from terminal state
G = 0
states_actions_returns = []
for i, state_action_reward in enumerate(reversed(states_actions_rewards)):
state, action, reward = state_action_reward
if i != 0:
states_actions_returns.append((state, action, G))
G = reward + GAMMA * G
states_actions_returns.reverse()
return states_actions_returns
def max_dict(hash):
max_key = None
max_val = float('-inf')
for k in hash:
if(hash[k] > max_val):
max_key, max_val = k, hash[k]
return max_key, max_val
if __name__ == '__main__':
#grid = standard_grid()
grid = negative_grid(-0.1)
print('grid')
print_values(grid.rewards, grid)
# init random policy
policy = {}
for s in grid.actions:
policy[s] = np.random.choice(list(ALL_POSSIBLE_ACTIONS))
print('policy')
print_policy(policy, grid)
# initialioze Q(s, a)
Q = {}
returns = {} # buffer to hold all the returns for a state during monte-carlo game plays
for s in grid.actions: # if state is non terminal
Q[s] = {}
for a in ALL_POSSIBLE_ACTIONS:
# for all the possible actions, initialize Q(s,a)
Q[s][a] = 0
returns[(s, a)] = []
# deltas
deltas = []
for sample in range(5000):
if sample % 500 == 0:
print(sample)
biggest_change = 0
# generate an episode and adapt Q(s, a)
states_actions_returns = play_game(grid, policy)
seen_states_actions = set()
for s, a, G in states_actions_returns:
key = (s, a)
if s not in seen_states_actions:
old_q = Q[s][a]
returns[key].append(G)
Q[s][a] = np.mean(returns[key])
seen_states_actions.add(key)
biggest_change = max(biggest_change, abs(G - old_q))
deltas.append(biggest_change)
# policy improvement
for s in Q:
policy[s] = max_dict(Q[s])[0]
plt.plot(deltas)
plt.show()
V = {}
# policy improvement
for s in Q:
V[s] = max_dict(Q[s])[1]
print('grid')
print_values(V, grid)
print('policy')
print_policy(policy, grid)
| gpl-2.0 |
hainm/MSMs | code/sandbox/tica_kde_svm.py | 3 | 2319 | from sklearn.covariance import EllipticEnvelope
import sklearn.neighbors
from sklearn.svm import OneClassSVM
import os
from msmbuilder import example_datasets, cluster, msm, featurizer, lumping, utils, dataset, decomposition
sysname = os.path.split(os.getcwd())[-1]
dt = 0.25
tica_lagtime = 400
regularization_string = "_012"
X0 = dataset.dataset("./tica/tica%d%s.h5" % (tica_lagtime, regularization_string))
slicer = featurizer.FirstSlicer(2)
X = slicer.transform(X0)
Xf0 = np.concatenate(X)
Xf = Xf0[::50]
hexbin(Xf0[:, 0], Xf0[:, 1], bins='log')
svm = OneClassSVM(nu=0.15)
svm.fit(Xf)
y = svm.predict(Xf)
plot(Xf[y==1][:, 0], Xf[y==1][:, 1], 'kx')
plot(Xf[y==-1][:, 0], Xf[y==-1][:, 1], 'wx')
clusterer = cluster.GMM(n_components=3)
yi = map(lambda x: svm.predict(x), X)
from msmbuilder.cluster import MultiSequenceClusterMixin, BaseEstimator
from sklearn.svm import OneClassSVM
class OneClassSVMTrimmer(MultiSequenceClusterMixin, OneClassSVM, BaseEstimator):
def partial_transform(self, traj):
"""Featurize an MD trajectory into a vector space.
Parameters
----------
traj : mdtraj.Trajectory
A molecular dynamics trajectory to featurize.
Returns
-------
features : np.ndarray, dtype=float, shape=(n_samples, n_features)
A featurized trajectory is a 2D array of shape
`(length_of_trajectory x n_features)` where each `features[i]`
vector is computed by applying the featurization function
to the `i`th snapshot of the input trajectory.
See Also
--------
transform : simultaneously featurize a collection of MD trajectories
"""
pass
def transform(self, traj_list, y=None):
"""Featurize a several trajectories.
Parameters
----------
traj_list : list(mdtraj.Trajectory)
Trajectories to be featurized.
Returns
-------
features : list(np.ndarray), length = len(traj_list)
The featurized trajectories. features[i] is the featurized
version of traj_list[i] and has shape
(n_samples_i, n_features)
"""
return [self.partial_transform(traj) for traj in traj_list]
trimmer = OneClassSVMTrimmer()
trimmer.fit(X[0:10])
| gpl-2.0 |
khrapovs/datastorage | datastorage/compustat.py | 1 | 2589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Short interest dynamics
"""
from __future__ import print_function, division
import os
import zipfile
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
path = os.getenv("HOME") + '/Dropbox/Research/data/Compustat/data/'
# __location__ = os.path.realpath(os.path.join(os.getcwd(),
# os.path.dirname(__file__)))
# path = os.path.join(__location__, path + 'Compustat/data/')
def date_convert(string):
return dt.datetime.strptime(string, '%d-%m-%Y')
def import_data():
"""Import data and save it to the disk.
"""
zf = zipfile.ZipFile(path + 'short_int.zip', 'r')
name = zf.namelist()[0]
short_int = pd.read_csv(zf.open(name),
converters={'datadate': date_convert})
columns = {'datadate': 'date',
'SHORTINTADJ': 'short_int',
'GVKEY': 'gvkey'}
short_int.rename(columns=columns, inplace=True)
short_int.set_index(['gvkey', 'date'], inplace=True)
short_int.sort_index(inplace=True)
short_int.to_hdf(path + 'short_int.h5', key='short_int')
print(short_int.head())
print(short_int.dtypes)
print('Number of unique companies: ',
short_int.index.get_level_values('gvkey').nunique())
print('Number of unique dates: ',
short_int.index.get_level_values('date').nunique())
print('Min and Max date: ',
short_int.index.get_level_values('date').min().date(), ',',
short_int.index.get_level_values('date').max().date())
def load_data():
"""Load data from disk and check for sanity.
"""
return pd.read_hdf(path + 'short_int.h5', 'short_int')
def count_companies(short_int):
"""Plot number of companies over time.
"""
df = short_int.reset_index().groupby('date')['gvkey'].nunique()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
data = df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)]
data.plot(figsize=(10, 3))
plt.show()
def mean_short_int(short_int):
"""Mean short interest on each date.
"""
df = short_int.groupby(level='date')['short_int'].mean()
sns.set_context('paper')
df.plot(figsize=(10, 3))
plt.show()
df.ix[:dt.date(2004, 12, 31)].plot(figsize=(10, 3))
plt.show()
df.ix[dt.date(2006, 1, 1):dt.date(2007, 6, 30)].plot(figsize=(10, 3))
plt.show()
if __name__ == '__main__':
import_data()
short_int = load_data()
count_companies(short_int)
mean_short_int(short_int)
| mit |
Achuth17/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
SanPen/GridCal | src/GridCal/Engine/Simulations/LinearFactors/linear_analysis_ts_driver.py | 1 | 10126 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import json
import pandas as pd
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve, factorized
import time
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Simulations.PowerFlow.power_flow_options import PowerFlowOptions
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis
from GridCal.Engine.Simulations.LinearFactors.linear_analysis_driver import LinearAnalysisOptions
from GridCal.Engine.Simulations.results_model import ResultsModel
from GridCal.Engine.Core.time_series_pf_data import compile_time_circuit
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import TSDriverTemplate
class LinearAnalysisTimeSeriesResults(ResultsTemplate):
def __init__(self, n, m, time_array, bus_names, bus_types, branch_names):
"""
TimeSeriesResults constructor
@param n: number of buses
@param m: number of branches
@param nt: number of time steps
"""
ResultsTemplate.__init__(self,
name='Linear Analysis time series',
available_results=[ResultTypes.BusActivePower,
ResultTypes.BranchActivePowerFrom,
ResultTypes.BranchLoading
],
data_variables=['bus_names',
'bus_types',
'time',
'branch_names',
'voltage',
'S',
'Sf',
'loading',
'losses'])
self.nt = len(time_array)
self.m = m
self.n = n
self.time = time_array
self.bus_names = bus_names
self.bus_types = bus_types
self.branch_names = branch_names
self.voltage = np.ones((self.nt, n), dtype=float)
self.S = np.zeros((self.nt, n), dtype=float)
self.Sf = np.zeros((self.nt, m), dtype=float)
self.loading = np.zeros((self.nt, m), dtype=float)
self.losses = np.zeros((self.nt, m), dtype=float)
def apply_new_time_series_rates(self, nc: "TimeCircuit"):
rates = nc.Rates.T
self.loading = self.Sf / (rates + 1e-9)
def get_results_dict(self):
"""
Returns a dictionary with the results sorted in a dictionary
:return: dictionary of 2D numpy arrays (probably of complex numbers)
"""
data = {'V': self.voltage.tolist(),
'P': self.S.real.tolist(),
'Q': self.S.imag.tolist(),
'Sbr_real': self.Sf.real.tolist(),
'Sbr_imag': self.Sf.imag.tolist(),
'loading': np.abs(self.loading).tolist()}
return data
def mdl(self, result_type: ResultTypes) -> "ResultsModel":
"""
Get ResultsModel instance
:param result_type:
:return: ResultsModel instance
"""
if result_type == ResultTypes.BusActivePower:
labels = self.bus_names
data = self.S
y_label = '(MW)'
title = 'Bus active power '
elif result_type == ResultTypes.BranchActivePowerFrom:
labels = self.branch_names
data = self.Sf.real
y_label = '(MW)'
title = 'Branch power '
elif result_type == ResultTypes.BranchLoading:
labels = self.branch_names
data = self.loading * 100
y_label = '(%)'
title = 'Branch loading '
elif result_type == ResultTypes.BranchLosses:
labels = self.branch_names
data = self.losses
y_label = '(MVA)'
title = 'Branch losses'
elif result_type == ResultTypes.BusVoltageModule:
labels = self.bus_names
data = self.voltage
y_label = '(p.u.)'
title = 'Bus voltage'
else:
raise Exception('Result type not understood:' + str(result_type))
if self.time is not None:
index = self.time
else:
index = list(range(data.shape[0]))
# assemble model
return ResultsModel(data=data, index=index, columns=labels, title=title, ylabel=y_label, units=y_label)
class LinearAnalysisTimeSeries(TSDriverTemplate):
name = 'Linear analysis time series'
tpe = SimulationTypes.LinearAnalysis_TS_run
def __init__(self, grid: MultiCircuit, options: LinearAnalysisOptions, start_=0, end_=None):
"""
TimeSeries constructor
@param grid: MultiCircuit instance
@param options: LinearAnalysisOptions instance
"""
TSDriverTemplate.__init__(self, grid=grid, start_=start_, end_=end_)
self.options = options
self.results = LinearAnalysisTimeSeriesResults(n=0,
m=0,
time_array=[],
bus_names=[],
bus_types=[],
branch_names=[])
self.ptdf_driver = LinearAnalysis(grid=self.grid, distributed_slack=self.options.distribute_slack)
def get_steps(self):
"""
Get time steps list of strings
"""
return [l.strftime('%d-%m-%Y %H:%M') for l in self.indices]
def run(self):
"""
Run the time series simulation
@return:
"""
self.__cancel__ = False
a = time.time()
if self.end_ is None:
self.end_ = len(self.grid.time_profile)
time_indices = np.arange(self.start_, self.end_ + 1)
ts_numeric_circuit = compile_time_circuit(self.grid)
self.results = LinearAnalysisTimeSeriesResults(n=ts_numeric_circuit.nbus,
m=ts_numeric_circuit.nbr,
time_array=ts_numeric_circuit.time_array[time_indices],
bus_names=ts_numeric_circuit.bus_names,
bus_types=ts_numeric_circuit.bus_types,
branch_names=ts_numeric_circuit.branch_names)
self.indices = pd.to_datetime(ts_numeric_circuit.time_array[time_indices])
self.progress_text.emit('Computing PTDF...')
linear_analysis = LinearAnalysis(grid=self.grid,
distributed_slack=self.options.distribute_slack,
correct_values=self.options.correct_values
)
linear_analysis.run()
self.progress_text.emit('Computing branch flows...')
Pbus_0 = ts_numeric_circuit.Sbus.real[:, time_indices]
self.results.Sf = linear_analysis.get_flows_time_series(Pbus_0)
# compute post process
self.results.loading = self.results.Sf / (ts_numeric_circuit.Rates[:, time_indices].T + 1e-9)
self.results.S = Pbus_0.T
self.elapsed = time.time() - a
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
if __name__ == '__main__':
from matplotlib import pyplot as plt
from GridCal.Engine import *
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/grid_2_islands.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
main_circuit = FileOpen(fname).open()
options_ = LinearAnalysisOptions()
ptdf_driver = LinearAnalysisTimeSeries(grid=main_circuit, options=options_)
ptdf_driver.run()
pf_options_ = PowerFlowOptions(solver_type=SolverType.NR)
ts_driver = TimeSeries(grid=main_circuit, options=pf_options_)
ts_driver.run()
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_title('Newton-Raphson based flow')
ax1.plot(ts_driver.results.Sf.real)
ax2 = fig.add_subplot(222)
ax2.set_title('PTDF based flow')
ax2.plot(ptdf_driver.results.Sf.real)
ax3 = fig.add_subplot(223)
ax3.set_title('Difference')
diff = ts_driver.results.Sf.real - ptdf_driver.results.Sf.real
ax3.plot(diff)
fig2 = plt.figure()
ax1 = fig2.add_subplot(221)
ax1.set_title('Newton-Raphson based voltage')
ax1.plot(np.abs(ts_driver.results.voltage))
ax2 = fig2.add_subplot(222)
ax2.set_title('PTDF based voltage')
ax2.plot(ptdf_driver.results.voltage)
ax3 = fig2.add_subplot(223)
ax3.set_title('Difference')
diff = np.abs(ts_driver.results.voltage) - ptdf_driver.results.voltage
ax3.plot(diff)
plt.show()
| gpl-3.0 |
AllenDowney/HeriReligion | archive/thinkplot.py | 3 | 22756 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#f7fbff', '#deebf7', '#c6dbef',
'#9ecae1', '#6baed6', '#4292c6',
'#2171b5','#08519c','#08306b'][::-1]
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7, 8],
]
current_figure = None
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, num):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[num]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.')
@classmethod
def InitIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
fig = plt.gcf()
cls.current_figure = fig
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
cls.current_figure = None
@classmethod
def GetIter(cls, num):
"""Gets the color iterator."""
fig = plt.gcf()
if fig != cls.current_figure:
cls.InitIter(num)
cls.current_figure = fig
if cls.color_iter is None:
cls.InitIter(num)
return cls.color_iter
def _UnderrideColor(options):
"""If color is not in the options, chooses a color.
"""
if 'color' in options:
return options
# get the current color iterator; if there is none, init one
color_iter = _Brewer.GetIter(5)
try:
options['color'] = next(color_iter)
except StopIteration:
# if you run out of colors, initialize the color iterator
# and try again
warnings.warn('Ran out of colors. Starting over.')
_Brewer.ClearIter()
_UnderrideColor(options)
return options
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (12, 6),
(1, 3): (12, 6),
(1, 4): (12, 5),
(1, 5): (12, 4),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
(4, 1): (8, 12),
}
if (rows, cols) in size_map:
fig = plt.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
ax = plt.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
else:
ax = plt.gca()
return ax
def SubPlot(plot_number, rows=None, cols=None, **options):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
options: passed to subplot
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
return plt.subplot(rows, cols, plot_number, **options)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
plt.clf()
fig = plt.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
plt.figure(**options)
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to plt.plot
options: keyword args passed to plt.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.7, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pd.Series):
ys = obj.values
xs = obj.index
if ys is None:
plt.plot(xs, style, **options)
else:
plt.plot(xs, ys, style, **options)
def Vlines(xs, y1, y2, **options):
"""Plots a set of vertical lines.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
options: keyword args passed to plt.vlines
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
plt.vlines(xs, y1, y2, **options)
def Hlines(ys, x1, x2, **options):
"""Plots a set of horizontal lines.
Args:
ys: sequence of y values
x1: sequence of x values
x2: sequence of x values
options: keyword args passed to plt.vlines
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
plt.hlines(ys, x1, x2, **options)
def axvline(x, **options):
"""Plots a vertical line.
Args:
x: x location
options: keyword args passed to plt.axvline
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
plt.axvline(x, **options)
def axhline(y, **options):
"""Plots a horizontal line.
Args:
y: y location
options: keyword args passed to plt.axhline
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=1, alpha=0.5)
plt.axhline(y, **options)
def tight_layout(**options):
"""Adjust subplots to minimize padding and margins.
"""
options = _Underride(options,
wspace=0.1, hspace=0.1,
left=0, right=1,
bottom=0, top=1)
plt.tight_layout()
plt.subplots_adjust(**options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Fills the space between two lines.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to plt.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
plt.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to plt.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
plt.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to plt.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pd.Series):
ys = xs.values
xs = xs.index
plt.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to plt.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
plt.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to plt.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to plt.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to plt.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
# see if the values support arithmetic
try:
xs[0] - xs[0]
except TypeError:
# if not, replace values with numbers
labels = [str(x) for x in xs]
xs = np.arange(len(xs))
plt.xticks(xs+0.5, labels)
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to plt.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to plt.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to plt.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to plt.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = np.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to plt.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use plt.imshow
options: keyword args passed to plt.pcolor and/or plt.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = plt.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
plt.pcolormesh(X, Y, Z, **options)
if contour:
cs = plt.contour(X, Y, Z, **options)
plt.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
plt.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to plt.pcolor and/or plt.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = plt.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
plt.pcolormesh(X, Y, Z, **options)
if contour:
cs = plt.contour(X, Y, Z, **options)
plt.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to plt.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
plt.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding plt functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(plt, name)(options[name])
global LEGEND
LEGEND = options.get('legend', LEGEND)
# see if there are any elements with labels;
# if not, don't draw a legend
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
if LEGEND and len(labels) > 0:
global LOC
LOC = options.get('loc', LOC)
frameon = options.get('frameon', True)
try:
plt.legend(loc=LOC, frameon=frameon)
except UserWarning:
pass
# x and y ticklabels can be made invisible
val = options.get('xticklabels', None)
if val is not None:
if val == 'invisible':
ax = plt.gca()
labels = ax.get_xticklabels()
plt.setp(labels, visible=False)
val = options.get('yticklabels', None)
if val is not None:
if val == 'invisible':
ax = plt.gca()
labels = ax.get_yticklabels()
plt.setp(labels, visible=False)
def set_font_size(title_size=16, label_size=16, ticklabel_size=14, legend_size=14):
"""Set font sizes for the title, labels, ticklabels, and legend.
"""
def set_text_size(texts, size):
for text in texts:
text.set_size(size)
ax = plt.gca()
# TODO: Make this function more robust if any of these elements
# is missing.
# title
ax.title.set_size(title_size)
# x axis
ax.xaxis.label.set_size(label_size)
set_text_size(ax.xaxis.get_ticklabels(), ticklabel_size)
# y axis
ax.yaxis.label.set_size(label_size)
set_text_size(ax.yaxis.get_ticklabels(), ticklabel_size)
# legend
legend = ax.get_legend()
if legend is not None:
set_text_size(legend.texts, legend_size)
def bigger_text():
sizes = dict(title_size=16, label_size=16, ticklabel_size=14, legend_size=14)
set_font_size(**sizes)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various plt functions
"""
clf = options.pop('clf', True)
Config(**options)
plt.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various plt functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(plt.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Note: With a capital S, this is the original save, maintained for
compatibility. New code should use save(), which works better
with my newer code, especially in Jupyter notebooks.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various plt functions
"""
clf = options.pop('clf', True)
save_options = {}
for option in ['bbox_inches', 'pad_inches']:
if option in options:
save_options[option] = options.pop(option)
# TODO: falling Config inside Save was probably a mistake, but removing
# it will require some work
Config(**options)
if formats is None:
formats = ['pdf', 'png']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt, **save_options)
if clf:
Clf()
def save(root, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see plt.savefig.
Args:
root: string filename root
formats: list of string formats
options: keyword args passed to plt.savefig
"""
if formats is None:
formats = ['pdf', 'png']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
for fmt in formats:
SaveFormat(root, fmt, **options)
def SaveFormat(root, fmt='eps', **options):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
_Underride(options, dpi=300)
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
plt.savefig(filename, format=fmt, **options)
# provide aliases for calling functions with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
vlines = Vlines
hlines = Hlines
fill_between = FillBetween
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| mit |
etkirsch/scikit-learn | sklearn/utils/estimator_checks.py | 21 | 51976 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
PyQuake/earthquakemodels | code/runExperiments/histogramMagnitude.py | 1 | 1982 | import matplotlib.pyplot as plt
import models.model as model
import earthquake.catalog as catalog
from collections import OrderedDict
def histogramMagnitude(catalog_, region):
"""
Creates the histogram of magnitudes by a given region.
Saves the histogram to the follwing path ./code/Zona2/histograms/'+region+'/Magnitude Histogram of ' + str(year) + " " + region + '.png'
Where region, year are given by the application
From 2000 to 2011
"""
definition = model.loadModelDefinition('../params/' + region + '.txt')
catalogFiltred = catalog.filter(catalog_, definition)
year = 2000
while(year < 2012):
data = dict()
for i in range(len(catalogFiltred)):
if catalogFiltred[i]['year'] == year and catalogFiltred[i]['lat'] > 34.8 and catalogFiltred[i][
'lat'] < 37.05 and catalogFiltred[i]['lon'] > 138.8 and catalogFiltred[i]['lon'] < 141.05:
data[catalogFiltred[i]['mag']] = data.get(catalogFiltred[i]['mag'], 0) + 1
b = OrderedDict(sorted(data.items()))
plt.title('Histogram of ' + str(year) + " " + region)
plt.bar(range(len(data)), b.values(), align='center')
plt.xticks(range(len(data)), b.keys(), rotation=25)
# print(b)
axes = plt.gca()
plt.savefig(
'../Zona2/histograms/'+region+'/Magnitude Histogram of ' +
str(year) +
" " +
region +
'.png')
del data
year += 1
def main():
"""
Calls function to plot a hitogram of magnitudes by region, based on JMA catalog
"""
catalog_ = catalog.readFromFile('../data/jmacat_2000_2013.dat')
region = "Kanto"
histogramMagnitude(catalog_, region)
region = "Kansai"
histogramMagnitude(catalog_, region)
region = "Tohoku"
histogramMagnitude(catalog_, region)
region = "EastJapan"
histogramMagnitude(catalog_, region)
if __name__ == "__main__":
main()
| bsd-3-clause |
rahul-c1/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
edx/ease | ease/model_creator.py | 1 | 7903 | #Provides interface functions to create and save models
import numpy
import re
import nltk
import sys
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import os
import sklearn.ensemble
from itertools import chain
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from .essay_set import EssaySet
from . import util_functions
from . import feature_extractor
import logging
from . import predictor_extractor
log=logging.getLogger()
def read_in_test_data(filename):
"""
Reads in test data file found at filename.
filename must be a tab delimited file with columns id, dummy number column, score, dummy score, text
returns the score and the text
"""
tid, e_set, score, score2, text = [], [], [], [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
tid1, set1, score1, score12, text1 = raw_lines[row].strip().split("\t")
tid.append(int(tid1))
text.append(text1)
e_set.append(int(set1))
score.append(int(score1))
score2.append(int(score12))
return score, text
def read_in_test_prompt(filename):
"""
Reads in the prompt from a text file
Returns string
"""
prompt_string = open(filename).read()
return prompt_string
def read_in_test_data_twocolumn(filename,sep=","):
"""
Reads in a two column version of the test data.
Filename must point to a delimited file.
In filename, the first column should be integer score data.
The second column should be string text data.
Sep specifies the type of separator between fields.
"""
score, text = [], []
combined_raw = open(filename).read()
raw_lines = combined_raw.splitlines()
for row in range(1, len(raw_lines)):
score1, text1 = raw_lines[row].strip().split("\t")
text.append(text1)
score.append(int(score1))
return score, text
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in range(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x
def get_cv_error(clf,feats,scores):
"""
Gets cross validated error for a given classifier, set of features, and scores
clf - classifier
feats - features to feed into the classified and cross validate over
scores - scores associated with the features -- feature row 1 associates with score 1, etc.
"""
results={'success' : False, 'kappa' : 0, 'mae' : 0}
try:
cv_preds=util_functions.gen_cv_preds(clf,feats,scores)
err=numpy.mean(numpy.abs(numpy.array(cv_preds)-scores))
kappa=util_functions.quadratic_weighted_kappa(list(cv_preds),scores)
results['mae']=err
results['kappa']=kappa
results['success']=True
except ValueError as ex:
# If this is hit, everything is fine. It is hard to explain why the error occurs, but it isn't a big deal.
msg = u"Not enough classes (0,1,etc) in each cross validation fold: {ex}".format(ex=ex)
log.debug(msg)
except:
log.exception("Error getting cv error estimates.")
return results
def get_algorithms(algorithm):
"""
Gets two classifiers for each type of algorithm, and returns them. First for predicting, second for cv error.
type - one of util_functions.AlgorithmTypes
"""
if algorithm == util_functions.AlgorithmTypes.classification:
clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
else:
clf = sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learning_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
return clf, clf2
def extract_features_and_generate_model_predictors(predictor_set, algorithm=util_functions.AlgorithmTypes.regression):
"""
Extracts features and generates predictors based on a given predictor set
predictor_set - a PredictorSet object that has been initialized with data
type - one of util_functions.AlgorithmType
"""
if(algorithm not in [util_functions.AlgorithmTypes.regression, util_functions.AlgorithmTypes.classification]):
algorithm = util_functions.AlgorithmTypes.regression
f = predictor_extractor.PredictorExtractor()
f.initialize_dictionaries(predictor_set)
train_feats = f.gen_feats(predictor_set)
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,predictor_set._target)
try:
set_score = numpy.asarray(predictor_set._target, dtype=numpy.int)
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score = predictor_set._target
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression):
"""
Feed in an essay set to get feature vector and classifier
essays must be an essay set object
additional array is an optional argument that can specify
a numpy array of values to add in
returns a trained FeatureExtractor object and a trained classifier
"""
f = feature_extractor.FeatureExtractor()
f.initialize_dictionaries(essays)
train_feats = f.gen_feats(essays)
set_score = numpy.asarray(essays._score, dtype=numpy.int)
if len(util_functions.f7(list(set_score)))>5:
algorithm = util_functions.AlgorithmTypes.regression
else:
algorithm = util_functions.AlgorithmTypes.classification
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,essays._score)
try:
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results
def dump_model_to_file(prompt_string, feature_ext, classifier, text, score, model_path):
"""
Writes out a model to a file.
prompt string is a string containing the prompt
feature_ext is a trained FeatureExtractor object
classifier is a trained classifier
model_path is the path of write out the model file to
"""
model_file = {'prompt': prompt_string, 'extractor': feature_ext, 'model': classifier, 'text' : text, 'score' : score}
pickle.dump(model_file, file=open(model_path, "w"))
def create_essay_set_and_dump_model(text,score,prompt,model_path,additional_array=None):
"""
Function that creates essay set, extracts features, and writes out model
See above functions for argument descriptions
"""
essay_set=create_essay_set(text,score,prompt)
feature_ext,clf=extract_features_and_generate_model(essay_set,additional_array)
dump_model_to_file(prompt,feature_ext,clf,model_path)
| agpl-3.0 |
nicproulx/mne-python | mne/time_frequency/tests/test_psd.py | 2 | 7360 | import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_raises
from nose.tools import assert_true
from mne import pick_types, Epochs, read_events
from mne.io import RawArray, read_raw_fif
from mne.utils import requires_version, slow_test, run_tests_if_main
from mne.time_frequency import psd_welch, psd_multitaper
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
@requires_version('scipy', '0.12')
def test_psd():
"""Tests the welch and multitaper PSD."""
raw = read_raw_fif(raw_fname)
picks_psd = [0, 1]
# Populate raw with sinusoids
rng = np.random.RandomState(40)
data = 0.1 * rng.randn(len(raw.ch_names), raw.n_times)
freqs_sig = [8., 50.]
for ix, freq in zip(picks_psd, freqs_sig):
data[ix, :] += 2 * np.sin(np.pi * 2. * freq * raw.times)
first_samp = raw._first_samps[0]
raw = RawArray(data, raw.info)
tmin, tmax = 0, 20 # use a few seconds of data
fmin, fmax = 2, 70 # look at frequencies between 2 and 70Hz
n_fft = 128
# -- Raw --
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
kws_welch = dict(n_fft=n_fft)
kws_mt = dict(low_bias=True)
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(raw, proj=False, **kws)
psds_proj, freqs_proj = func(raw, proj=True, **kws)
assert_true(psds.shape == (len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Is power found where it should be
ixs_max = np.argmax(psds, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj)
# Array input shouldn't work
assert_raises(ValueError, func, raw[:3, :20][0])
# test n_per_seg in psd_welch (and padding)
psds1, freqs1 = psd_welch(raw, proj=False, n_fft=128, n_per_seg=128,
**kws_psd)
psds2, freqs2 = psd_welch(raw, proj=False, n_fft=256, n_per_seg=128,
**kws_psd)
assert_true(len(freqs1) == np.floor(len(freqs2) / 2.))
assert_true(psds1.shape[-1] == np.floor(psds2.shape[-1] / 2.))
# tests ValueError when n_per_seg=None and n_fft > signal length
kws_psd.update(dict(n_fft=tmax * 1.1 * raw.info['sfreq']))
assert_raises(ValueError, psd_welch, raw, proj=False, n_per_seg=None,
**kws_psd)
# ValueError when n_overlap > n_per_seg
kws_psd.update(dict(n_fft=128, n_per_seg=64, n_overlap=90))
assert_raises(ValueError, psd_welch, raw, proj=False, **kws_psd)
# -- Epochs/Evoked --
events = read_events(event_fname)
events[:, 0] -= first_samp
tmin, tmax, event_id = -0.5, 0.5, 1
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks_psd,
proj=False, preload=True, baseline=None)
evoked = epochs.average()
tmin_full, tmax_full = -1, 1
epochs_full = Epochs(raw, events[:10], event_id, tmin_full, tmax_full,
picks=picks_psd, proj=False, preload=True,
baseline=None)
kws_psd = dict(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
picks=picks_psd) # Common to all
funcs = [(psd_welch, kws_welch),
(psd_multitaper, kws_mt)]
for func, kws in funcs:
kws = kws.copy()
kws.update(kws_psd)
psds, freqs = func(
epochs[:1], proj=False, **kws)
psds_proj, freqs_proj = func(
epochs[:1], proj=True, **kws)
psds_f, freqs_f = func(
epochs_full[:1], proj=False, **kws)
# this one will fail if you add for example 0.1 to tmin
assert_array_almost_equal(psds, psds_f, 27)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds, psds_proj, 27)
# Is power found where it should be
ixs_max = np.argmax(psds.mean(0), axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs))
assert_true(np.abs(ixmax - ixtrue) < 2)
assert_true(psds.shape == (1, len(kws['picks']), len(freqs)))
assert_true(np.sum(freqs < 0) == 0)
assert_true(np.sum(psds < 0) == 0)
# Array input shouldn't work
assert_raises(ValueError, func, epochs.get_data())
# Testing evoked (doesn't work w/ compute_epochs_psd)
psds_ev, freqs_ev = func(
evoked, proj=False, **kws)
psds_ev_proj, freqs_ev_proj = func(
evoked, proj=True, **kws)
# Is power found where it should be
ixs_max = np.argmax(psds_ev, axis=1)
for ixmax, ifreq in zip(ixs_max, freqs_sig):
# Find nearest frequency to the "true" freq
ixtrue = np.argmin(np.abs(ifreq - freqs_ev))
assert_true(np.abs(ixmax - ixtrue) < 2)
# Make sure the projection doesn't change channels it shouldn't
assert_array_almost_equal(psds_ev, psds_ev_proj, 27)
assert_true(psds_ev.shape == (len(kws['picks']), len(freqs)))
@slow_test
@requires_version('scipy', '0.12')
def test_compares_psd():
"""Test PSD estimation on raw for plt.psd and scipy.signal.welch."""
raw = read_raw_fif(raw_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, stim=False,
exclude=exclude)[:2]
tmin, tmax = 0, 10 # use the first 60s of data
fmin, fmax = 2, 70 # look at frequencies between 5 and 70Hz
n_fft = 2048
# Compute psds with the new implementation using Welch
psds_welch, freqs_welch = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=False, picks=picks,
n_fft=n_fft, n_jobs=1)
# Compute psds with plt.psd
start, stop = raw.time_as_index([tmin, tmax])
data, times = raw[picks, start:(stop + 1)]
from matplotlib.pyplot import psd
out = [psd(d, Fs=raw.info['sfreq'], NFFT=n_fft) for d in data]
freqs_mpl = out[0][1]
psds_mpl = np.array([o[0] for o in out])
mask = (freqs_mpl >= fmin) & (freqs_mpl <= fmax)
freqs_mpl = freqs_mpl[mask]
psds_mpl = psds_mpl[:, mask]
assert_array_almost_equal(psds_welch, psds_mpl)
assert_array_almost_equal(freqs_welch, freqs_mpl)
assert_true(psds_welch.shape == (len(picks), len(freqs_welch)))
assert_true(psds_mpl.shape == (len(picks), len(freqs_mpl)))
assert_true(np.sum(freqs_welch < 0) == 0)
assert_true(np.sum(freqs_mpl < 0) == 0)
assert_true(np.sum(psds_welch < 0) == 0)
assert_true(np.sum(psds_mpl < 0) == 0)
run_tests_if_main()
| bsd-3-clause |
aminert/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
jadecastro/LTLMoP | src/lib/handlers/motionControl/RRTController.py | 1 | 37133 | #!/usr/bin/env python
"""
===================================================================
RRTController.py - Rapidly-Exploring Random Trees Motion Controller
===================================================================
Uses Rapidly-exploring Random Tree Algorithm to generate paths given the starting position and the goal point.
"""
from numpy import *
from __is_inside import *
import math
import sys,os
from scipy.linalg import norm
from numpy.matlib import zeros
import __is_inside
import time, sys,os
import scipy as Sci
import scipy.linalg
import Polygon, Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
from math import sqrt, fabs , pi
import random
import thread
import threading
# importing matplotlib to show the path if possible
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import_matplotlib = True
except:
print "matplotlib is not imported. Plotting is disabled"
import_matplotlib = False
class motionControlHandler:
def __init__(self, proj, shared_data,robot_type,max_angle_goal,max_angle_overlap,plotting):
"""
Rapidly-Exploring Random Trees alogorithm motion planning controller
robot_type (int): Which robot is used for execution. BasicSim is 1, ODE is 2, ROS is 3, Nao is 4, Pioneer is 5(default=1)
max_angle_goal (float): The biggest difference in angle between the new node and the goal point that is acceptable. If it is bigger than the max_angle, the new node will not be connected to the goal point. The value should be within 0 to 6.28 = 2*pi. Default set to 6.28 = 2*pi (default=6.28)
max_angle_overlap (float): difference in angle allowed for two nodes overlapping each other. If you don't want any node overlapping with each other, put in 2*pi = 6.28. Default set to 1.57 = pi/2 (default=1.57)
plotting (bool): Check the box to enable plotting. Uncheck to disable plotting (default=True)
"""
self.system_print = False # for debugging. print on GUI ( a bunch of stuffs)
self.finish_print = False # set to 1 to print the original finished E and V before trimming the tree
self.orientation_print = False # show the orientation information of the robot
# Get references to handlers we'll need to communicate with
self.drive_handler = proj.h_instance['drive']
self.pose_handler = proj.h_instance['pose']
# Get information about regions
self.proj = proj
self.coordmap_map2lab = proj.coordmap_map2lab
self.coordmap_lab2map = proj.coordmap_lab2map
self.last_warning = 0
self.previous_next_reg = None
# Store the Rapidly-Exploring Random Tress Built
self.RRT_V = None # array containing all the points on the RRT Tree
self.RRT_E = None # array specifying the connection of points on the Tree
self.E_current_column = None # the current column on the tree (to find the current heading point)
self.Velocity = None
self.currentRegionPoly = None
self.nextRegionPoly = None
self.map = {}
self.all = Polygon.Polygon()
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector
self.stuck_thres = 20 # threshold for changing the range of sampling omega
# Information about the robot (default set to ODE)
if robot_type not in [1,2,3,4,5]:
robot_type = 1
self.system = robot_type
# Information about maximum turning angle allowed from the latest node to the goal point
if max_angle_goal > 2*pi:
max_angle_goal = 2*pi
if max_angle_goal < 0:
max_angle_goal = 0
self.max_angle_allowed = max_angle_goal
# Information about maximum difference in angle allowed between two overlapping nodes
if max_angle_overlap > 2*pi:
max_angle_overlap = 2*pi
if max_angle_overlap < 0:
max_angle_overlap = 0
self.max_angle_overlap = max_angle_overlap
# Information about whether plotting is enabled.
if plotting is True and import_matplotlib == True:
self.plotting = True
else:
self.plotting = False
# Specify the size of the robot
# 1: basicSim; 2: ODE; 3: ROS 4: Nao; 5: Pioneer
# self.radius: radius of the robot
# self.timestep : number of linear segments to break the curve into for calculation of x, y position
# self.step_size : the length of each step for connection to goal point
# self.velocity : Velocity of the robot in m/s in control space (m/s)
if self.system == 1:
self.radius = 5
self.step_size = 25
self.timeStep = 10
self.velocity = 2 # 1.5
if self.system == 2:
self.radius = 5
self.step_size = 15
self.timeStep = 10
self.velocity = 2 # 1.5
elif self.system == 3:
self.ROSInitHandler = shared_data['ROS_INIT_HANDLER']
self.radius = self.ROSInitHandler.robotPhysicalWidth/2
self.step_size = self.radius*3 #0.2
self.timeStep = 8
self.velocity = self.radius/2 #0.08
elif self.system == 4:
self.radius = 0.15*1.2
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
elif self.system == 5:
self.radius = 0.15
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
# Operate_system (int): Which operating system is used for execution.
# Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
if self.system_print == True:
print "The operate_system is "+ str(self.operate_system)
# Generate polygon for regions in the map
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
# Generate the boundary polygon
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
# Start plotting if operating in Windows
if self.operate_system == 2 and self.plotting ==True:
# start using anmination to plot the robot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return True
# Find our current configuration
pose = self.pose_handler.getPose()
# Check if Vicon has cut out
# TODO: this should probably go in posehandler?
if math.isnan(pose[2]):
print "WARNING: No Vicon data! Pausing."
self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part will be run when the robot goes to a new region, otherwise, the original tree will be used.
if not self.previous_next_reg == next_reg:
# Entered a new region. New tree should be formed.
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
if self.system_print == True:
print "next Region is " + str(self.proj.rfi.regions[next_reg].name)
print "Current Region is " + str(self.proj.rfi.regions[current_reg].name)
#set to zero velocity before tree is generated
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
# Determine the mid points on the faces connecting to the next region (one goal point will be picked among all the mid points later in buildTree)
transFace = None
q_gBundle = [[],[]] # list of goal points (midpoints of transition faces)
face_normal = [[],[]] # normal of the trnasition faces
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
#find the normal vector to the face
face = transFace[0,:] - transFace[1,:]
distance_face = norm(face)
normal = face/distance_face * self.trans_matrix
face_normal = hstack((face_normal,vstack((normal[0,0],normal[0,1]))))
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
# Run algorithm to build the Rapid-Exploring Random Trees
self.RRT_V = None
self.RRT_E = None
# For plotting
if self.operate_system == 2:
if self.plotting == True:
self.ax.cla()
else:
self.ax = None
else:
self.ax = None
if self.operate_system == 1 and self.plotting == True:
plt.cla()
self.plotMap(self.map)
plt.plot(pose[0],pose[1],'ko')
self.RRT_V,self.RRT_E,self.E_current_column = self.buildTree(\
[pose[0], pose[1]],pose[2],self.currentRegionPoly, self.nextRegionPoly,q_gBundle,face_normal)
"""
# map the lab coordinates back to pixels
V_tosend = array(mat(self.RRT_V[1:,:])).T
V_tosend = map(self.coordmap_lab2map, V_tosend)
V_tosend = mat(V_tosend).T
s = 'RRT:E'+"["+str(list(self.RRT_E[0]))+","+str(list(self.RRT_E[1]))+"]"+':V'+"["+str(list(self.RRT_V[0]))+","+str(list(V_tosend[0]))+","+str(list(V_tosend[1]))+"]"+':T'+"["+str(list(q_gBundle[0]))+","+str(list(q_gBundle[1]))+"]"
#print s
"""
# Run algorithm to find a velocity vector (global frame) to take the robot to the next region
self.Velocity = self.getVelocity([pose[0], pose[1]], self.RRT_V,self.RRT_E)
#self.Node = self.getNode([pose[0], pose[1]], self.RRT_V,self.RRT_E)
self.previous_next_reg = next_reg
# Pass this desired velocity on to the drive handler
self.drive_handler.setVelocity(self.Velocity[0,0], self.Velocity[1,0], pose[2])
#self.drive_handler.setVelocity(self.Node[0,0], self.Node[1,0], pose[2])
RobotPoly = Polygon.Shapes.Circle(self.radius,(pose[0],pose[1]))
# check if robot is inside the current region
departed = not self.currentRegionPoly.overlaps(RobotPoly)
arrived = self.nextRegionPoly.covers(RobotPoly)
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
print "I think I'm in " + r.name
print pose
break
self.last_warning = time.time()
#print "arrived:"+str(arrived)
return arrived
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
def getNode(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
Node = zeros([2,1])
Node[0,0] = V[1,E[1,self.E_current_column]]
Node[1,0] = V[2,E[1,self.E_current_column]]
#Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Node
def buildTree(self,p,theta,regionPoly,nextRegionPoly,q_gBundle,face_normal, last=False):
"""
This function builds the RRT tree.
p : x,y position of the robot
theta : current orientation of the robot
regionPoly : current region polygon
nextRegionPoly : next region polygon
q_gBundle : coordinates of q_goals that the robot can reach
face_normal : the normal vector of each face corresponding to each goal point in q_gBundle
"""
q_init = mat(p).T
V = vstack((0,q_init))
theta = self.orientation_bound(theta)
V_theta = array([theta])
#!!! CONTROL SPACE: generate a list of omega for random sampling
omegaLowerBound = -math.pi/20 # upper bound for the value of omega
omegaUpperBound = math.pi/20 # lower bound for the value of omega
omegaNoOfSteps = 20
self.omega_range = linspace(omegaLowerBound,omegaUpperBound,omegaNoOfSteps)
self.omega_range_escape = linspace(omegaLowerBound*4,omegaUpperBound*4,omegaNoOfSteps*4) # range used when stuck > stuck_thres
regionPolyOld = Polygon.Polygon(regionPoly)
regionPoly += PolyShapes.Circle(self.radius*2.5,(q_init[0,0],q_init[1,0]))
# check faces of the current region for goal points
E = [[],[]] # the tree matrix
Other = [[],[]]
path = False # if path formed then = 1
stuck = 0 # count for changing the range of sampling omega
append_after_latest_node = False # append new nodes to the latest node
if self.system_print == True:
print "plotting in buildTree is " + str(self.plotting)
if self.plotting == True:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
while not path:
#step -1: try connection to q_goal (generate path to goal)
i = 0
if self.system_print == True:
print "Try Connection to the goal points"
# pushing possible q_goals into the current region (ensure path is covered by the current region polygon)
q_pass = [[],[],[]]
q_pass_dist = []
q_gBundle = mat(q_gBundle)
face_normal = mat(face_normal)
while i < q_gBundle.shape[1]:
q_g_original = q_gBundle[:,i]
q_g = q_gBundle[:,i]+face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#q_g = q_gBundle[:,i]+(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
if not regionPolyOld.isInside(q_g[0],q_g[1]):
#q_g = q_gBundle[:,i]-(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
q_g = q_gBundle[:,i]-face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#forming polygon for path checking
EdgePolyGoal = PolyShapes.Circle(self.radius,(q_g[0,0],q_g[1,0])) + PolyShapes.Circle(self.radius,(V[1,shape(V)[1]-1],V[2:,shape(V)[1]-1]))
EdgePolyGoal = PolyUtils.convexHull(EdgePolyGoal)
dist = norm(q_g - V[1:,shape(V)[1]-1])
#check connection to goal
connect_goal = regionPoly.covers(EdgePolyGoal) #check coverage of path from new point to goal
# compare orientation difference
thetaPrev = V_theta[shape(V)[1]-1]
theta_orientation = abs(arctan((q_g[1,0]- V[2,shape(V)[1]-1])/(q_g[0,0]- V[1,shape(V)[1]-1])))
if q_g[1,0] > V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: # second quadrant
theta_orientation = pi - theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # first quadrant
theta_orientation = theta_orientation
elif q_g[1,0] < V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: #third quadrant
theta_orientation = pi + theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # foruth quadrant
theta_orientation = 2*pi - theta_orientation
# check the angle between vector(new goal to goal_original ) and vector( latest node to new goal)
Goal_to_GoalOriginal = q_g_original - q_g
LatestNode_to_Goal = q_g - V[1:,shape(V)[1]-1]
Angle_Goal_LatestNode= arccos(vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))/norm(Goal_to_GoalOriginal)/norm(LatestNode_to_Goal))
# if connection to goal can be established and the max change in orientation of the robot is smaller than max_angle, tree is said to be completed.
if self.orientation_print == True:
print "theta_orientation is " + str(theta_orientation)
print "thetaPrev is " + str(thetaPrev)
print "(theta_orientation - thetaPrev) is " + str(abs(theta_orientation - thetaPrev))
print "self.max_angle_allowed is " + str(self.max_angle_allowed)
print "abs(theta_orientation - thetaPrev) < self.max_angle_allowed" + str(abs(theta_orientation - thetaPrev) < self.max_angle_allowed)
print"Goal_to_GoalOriginal: " + str( array(Goal_to_GoalOriginal)) + "; LatestNode_to_Goal: " + str( array(LatestNode_to_Goal))
print vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))
print "Angle_Goal_LatestNode" + str(Angle_Goal_LatestNode)
if connect_goal and (abs(theta_orientation - thetaPrev) < self.max_angle_allowed) and (Angle_Goal_LatestNode < self.max_angle_allowed):
path = True
q_pass = hstack((q_pass,vstack((i,q_g))))
q_pass_dist = hstack((q_pass_dist,dist))
i = i + 1
if self.system_print == True:
print "checked goal points"
self.E = E
self.V = V
# connection to goal has established
# Obtain the closest goal point that path can be formed.
if path:
if shape(q_pass_dist)[0] == 1:
cols = 0
else:
(cols,) = nonzero(q_pass_dist == min(q_pass_dist))
cols = asarray(cols)[0]
q_g = q_pass[1:,cols]
"""
q_g = q_g-(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*3*self.radius #org 3
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g+(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*6*self.radius #org 3
"""
if self.plotting == True :
if self.operate_system == 1:
plt.suptitle('Rapidly-exploring Random Tree', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
if shape(V)[1] <= 2:
self.ax.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
self.ax.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
# trim the path connecting current node to goal point into pieces if the path is too long now
numOfPoint = floor(norm(V[1:,shape(V)[1]-1]- q_g)/self.step_size)
if numOfPoint < 3:
numOfPoint = 3
x = linspace( V[1,shape(V)[1]-1], q_g[0,0], numOfPoint )
y = linspace( V[2,shape(V)[1]-1], q_g[1,0], numOfPoint )
for i in range(x.shape[0]):
if i != 0:
V = hstack((V,vstack((shape(V)[1],x[i],y[i]))))
E = hstack((E,vstack((shape(V)[1]-2,shape(V)[1]-1))))
#push the goal point to the next region
q_g = q_g+face_normal[:,q_pass[0,cols]]*3*self.radius ##original 2*self.radius
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g-face_normal[:,q_pass[0,cols]]*6*self.radius ##original 2*self.radius
V = hstack((V,vstack((shape(V)[1],q_g[0,0],q_g[1,0]))))
E = hstack((E,vstack((shape(V)[1]-2 ,shape(V)[1]-1))))
if self.plotting == True :
if self.operate_system == 1:
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
plt.figure(1).canvas.draw()
else:
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
self.ax.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
# path is not formed, try to append points onto the tree
if not path:
# connection_to_tree : connection to the tree is successful
if append_after_latest_node:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode(V,V_theta,E,Other,regionPoly,stuck, append_after_latest_node)
else:
connection_to_tree = False
while not connection_to_tree:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode (V,V_theta,E,Other,regionPoly,stuck)
if self.finish_print:
print 'Here is the V matrix:', V, 'Here is the E matrix:',E
print >>sys.__stdout__, 'Here is the V matrix:\n', V, '\nHere is the E matrix:\n',E
#B: trim to a single path
single = 0
while single == 0:
trim = 0
for j in range(shape(V)[1]-3):
(row,col) = nonzero(E == j+1)
if len(col) == 1:
E = delete(E, col[0], 1)
trim = 1
if trim == 0:
single = 1;
####print with matlib
if self.plotting ==True :
if self.operate_system == 1:
plt.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
plt.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
plt.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
self.ax.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
self.ax.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
self.ax.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
#return V, E, and the current node number on the tree
V = array(V)
return V, E, 0
def generateNewNode(self,V,V_theta,E,Other,regionPoly,stuck,append_after_latest_node =False):
"""
Generate a new node on the current tree matrix
V : the node matrix
V_theta : the orientation matrix
E : the tree matrix (or edge matrix)
Other : the matrix containing the velocity and angular velocity(omega) information
regionPoly: the polygon of current region
stuck : count on the number of times failed to generate new node
append_after_latest_node : append new nodes to the latest node (True only if the previous node addition is successful)
"""
if self.system_print == True:
print "In control space generating path,stuck = " + str(stuck)
connection_to_tree = False # True when connection to the tree is successful
if stuck > self.stuck_thres:
# increase the range of omega since path cannot ge generated
omega = random.choice(self.omega_range_escape)
else:
#!!!! CONTROL SPACE STEP 1 - generate random omega
omega = random.choice(self.omega_range)
#!!!! CONTROL SPACE STEP 2 - pick a random point on the tree
if append_after_latest_node:
tree_index = shape(V)[1]-1
else:
if random.choice([1,2]) == 1:
tree_index = random.choice(array(V[0])[0])
else:
tree_index = shape(V)[1]-1
xPrev = V[1,tree_index]
yPrev = V[2,tree_index]
thetaPrev = V_theta[tree_index]
j = 1
#!!!! CONTROL SPACE STEP 3 - Check path of the robot
path_robot = PolyShapes.Circle(self.radius,(xPrev,yPrev))
while j <= self.timeStep:
xOrg = xPrev
yOrg = yPrev
xPrev = xPrev + self.velocity/omega*(sin(omega* 1 + thetaPrev)-sin(thetaPrev))
yPrev = yPrev - self.velocity/omega*(cos(omega* 1 + thetaPrev)-cos(thetaPrev))
thetaPrev = omega* 1 + thetaPrev
path_robot = path_robot + PolyShapes.Circle(self.radius,(xPrev,yPrev))
j = j + 1
thetaPrev = self.orientation_bound(thetaPrev)
path_all = PolyUtils.convexHull(path_robot)
in_bound = regionPoly.covers(path_all)
"""
# plotting
if plotting == True:
self.plotPoly(path_all,'r',1)
"""
stuck = stuck + 1
if in_bound:
robot_new_node = PolyShapes.Circle(self.radius,(xPrev,yPrev))
# check how many nodes on the tree does the new node overlaps with
nodes_overlap_count = 0
for k in range(shape(V)[1]-1):
robot_old_node = PolyShapes.Circle(self.radius,(V[1,k],V[2,k]))
if robot_new_node.overlaps(robot_old_node):
if abs(thetaPrev - V_theta[k]) < self.max_angle_overlap:
nodes_overlap_count += 1
if nodes_overlap_count == 0 or (stuck > self.stuck_thres+1 and nodes_overlap_count < 2) or (stuck > self.stuck_thres+500):
if stuck > self.stuck_thres+1:
append_after_latest_node = False
if (stuck > self.stuck_thres+500):
stuck = 0
stuck = stuck - 20
# plotting
if self.plotting == True:
self.plotPoly(path_all,'b',1)
if self.system_print == True:
print "node connected"
V = hstack((V,vstack((shape(V)[1],xPrev,yPrev))))
V_theta = hstack((V_theta,thetaPrev))
E = hstack((E,vstack((tree_index ,shape(V)[1]-1))))
Other = hstack((Other,vstack((self.velocity,omega))))
##################### E should add omega and velocity
connection_to_tree = True
append_after_latest_node = True
else:
append_after_latest_node = False
if self.system_print == True:
print "node not connected. check goal point"
else:
append_after_latest_node = False
return V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree
def orientation_bound(self,theta):
"""
make sure the returned angle is between 0 to 2*pi
"""
while theta > 2*pi or theta < 0:
if theta > 2*pi:
theta = theta - 2*pi
else:
theta = theta + 2*pi
return theta
def plotMap(self,mappedRegions):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
"""
#if not plt.isinteractive():
# plt.ion()
#plt.hold(True)
if self.operate_system == 1:
for regionName,regionPoly in mappedRegions.iteritems():
self.plotPoly(regionPoly,'k')
plt.figure(1).canvas.draw()
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
plt.figure(1).canvas.draw()
def data_gen(self):
#self.ax.cla()
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
"""
#for i in range(len(self.V)):
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
"""
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
"""
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
"""
yield(pose[0],pose[1])
"""
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
"""
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 |
mhue/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
rbharvs/mnd-learning | supervised.py | 1 | 8636 | import sys
import parsetags
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.decomposition import PCA as PCA
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import nltk
from nltk.stem import LancasterStemmer
import re
def get_top_tags(segment_tags, n):
tag_freqs = {}
for tag_list in segment_tags.values():
for tag in tag_list:
if tag not in tag_freqs:
tag_freqs[tag] = 0
tag_freqs[tag] += 1
return ['NULL'] + sorted(tag_freqs.keys(), key=lambda x: tag_freqs[x])[-n:]
def get_common_words(n=100):
try:
file_content = open(sys.argv[3]).read()
common_words = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(common_words[:n])
def get_named_entities():
try:
file_content = open(sys.argv[2]).read()
named_entities = nltk.word_tokenize(file_content)
except IndexError:
return None
return set(named_entities)
def filter_segments(segment_tags, ntags):
filtered_segtags = {}
for segment in segment_tags:
# np.random.shuffle(segment_tags[segment])
for tag in segment_tags[segment]:
if tag not in ntags: continue
filtered_segtags[segment] = ntags.index(tag)
if segment not in filtered_segtags:
filtered_segtags[segment] = 0
return filtered_segtags
def increase_num_segments(segment_tags, n, length=1000):
new_segment_tags = {}
segments = sorted(segment_tags.keys(), key=len)
lengths = np.array([len(seg) for seg in segments])
dist = lengths/np.sum(lengths)
random_segments = np.random.choice(segments, size=n, p=dist)
for segment in random_segments:
new_segment = segment
if len(new_segment) > length:
index = np.random.randint(0, len(new_segment)-length)
new_segment = new_segment[index:index+length]
new_segment_tags[new_segment] = segment_tags[segment]
return new_segment_tags
def named_entity_reduction(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
new_segment_tags = {}
for i in range(len(segments)):
new_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return new_segment_tags
def stemming_reduction(segment_tags):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
stemmer = LancasterStemmer()
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
segment = re.sub(r'[^\x00-\x7f]',r'', segment)
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
try:
new_segment += stemmer.stem(token)+' '
except UnicodeDecodeError:
new_segment += ''
new_segments.append(new_segment)
stemmed_segment_tags = {}
for i in range(len(segments)):
stemmed_segment_tags[new_segments[i]] = segment_tags[segments[i]]
return stemmed_segment_tags
def separate_segments(segment_tags, k):
train = {}
for segment in segment_tags.keys():
if np.random.random() < k:
train[segment] = segment_tags.pop(segment)
return train, segment_tags
def bag_of_words(segment_tags, tfidf=False):
#create matrix of word frequencies
segments = list(segment_tags.keys())
vec = CountVectorizer()
word_freqs = vec.fit_transform(segments).toarray()
if tfidf:
tfidf_transformer = TfidfTransformer()
word_freqs = tfidf_transformer.fit_transform(word_freqs)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return word_freqs, labels, segments
def entity_bow(segment_tags, named_entities, common_words):
punctuation = [',', '.', "'", '?', ';', ':', '!', '(', ')', '`', '--'
'\xe2', '\x80', '\x94', '\x99']
new_segments = []
segments = list(segment_tags.keys())
for segment in segments:
new_segment = ''
tokens = nltk.word_tokenize(segment)
for token in tokens:
if token in punctuation: continue
if token.lower() in common_words: continue
if token not in named_entities: continue
new_segment += token + ' '
new_segments.append(new_segment)
vec = CountVectorizer()
word_freqs = vec.fit_transform(new_segments).toarray()
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(word_freqs)
print(word_freqs.shape, X_train_tfidf.shape)
labels = np.empty(shape=len(segments))
for i in range(len(segments)):
labels[i] = segment_tags[segments[i]]
return X_train_tfidf, labels, segments
def pca_plot(Xtrain, ytrain):
#binary classification case
X_reduced = Xtrain
pca = PCA(3)
X_pca = pca.fit_transform(X_reduced)
ax = plt.axes(projection='3d')
for i in range(X_pca.shape[0]):
if ytrain[i] == 1:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i, 1], 'o', color='blue')
else:
ax.scatter(X_pca[i, 0], X_pca[i, 2], X_pca[i,1], 'x', color='red')
plt.show()
def randomize_order(X, y, segments):
shuffled_segments = []
indices = np.arange(len(segments))
np.random.shuffle(indices)
X, y = X[indices], y[indices]
for i in indices:
shuffled_segments.append(segments[i])
return X, y, segments
def naive_bayes(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
nb_classifier = MultinomialNB().fit(Xtrain, ytrain)
nb_predicted_tags = nb_classifier.predict(Xtest)
nb_success_rate = np.mean(nb_predicted_tags == ytest)
return 1-nb_success_rate
def support_vector(segment_tags, k=0.5, normalize=False):
X, y, segments = randomize_order(*bag_of_words(segment_tags, tfidf=normalize))
num_examples = len(segments)
Xtest, ytest = X[int(k*num_examples):, :], y[int(k*num_examples):]
Xtrain, ytrain = X[:int(k*num_examples), :], y[:int(k*num_examples)]
svm_classifier = svm.SVC()
svm_classifier.fit(Xtrain, ytrain)
svm_predicted_tags = svm_classifier.predict(Xtest)
svm_success_rate = np.mean(svm_predicted_tags == ytest)
return 1-svm_success_rate
if __name__ == "__main__":
orig_segment_tags = parsetags.parse_tags(sys.argv[1])
common_words = get_common_words()
named_entities = get_named_entities()
for sample_size in ['BIG']:
if sample_size == 'BIG':
segment_tags = increase_num_segments(orig_segment_tags, 3000, length=1000)
orig_segment_tags = segment_tags
for text_features in ['REGULAR', 'STEMMED', 'NAMED']:
if text_features == 'STEMMED':
segment_tags = stemming_reduction(orig_segment_tags)
if text_features == 'NAMED':
segment_tags = named_entity_reduction(orig_segment_tags, named_entities, common_words)
for freq_feature in ['COUNT', 'TFIDF']:
# ntags = get_top_tags(segment_tags, 7)
print(sample_size, text_features, freq_feature)
ntags = ['ETA', 'EHDRH', 'AFR']
filtered_segtags = filter_segments(segment_tags, ntags)
with open('Results/' + sample_size + '_' + text_features + '_' + freq_feature + '.txt', 'w') as f:
for i in range(100):
f.write(str(naive_bayes(filtered_segtags, normalize=(freq_feature is 'TFIDF'))) + '\n')
# segment_tags = parsetags.parse_tags(sys.argv[1])
# big_segment_tags = increase_num_segments(segment_tags, 3000, length=1000)
# ntags = get_top_tags(segment_tags, 7)
# for
# # ntags = ['NULL', 'ETA', 'EHDRH', 'AFR']
# common_words = get_common_words()
# named_entities = get_named_entities()
# filtered_segtags = filter_segments(segment_tags, ntags)
# #entity_bow(filtered_segtags, named_entities, common_words)
# naive_bayes(filtered_segtags, named_entities, common_words, features=entity_bow)
# naive_bayes(filtered_segtags)
# support_vector(filtered_segtags)
# predicted_tags = [ntags[int(np.round(nb_predicted_tags[i]))] for i in range(len(svm_predicted_tags))]
# count = 0
# print(ntags)
# for i in range(len(predicted_tags)):
# if predicted_tags[i] == 'NULL':
# if all(tag not in segment_tags[shuffled_segments[i]] for tag in ntags):
# count += 1
# else:
# if predicted_tags[i] in segment_tags[shuffled_segments[i]]:
# count += 1
# print(count/len(predicted_tags))
| mit |
huobaowangxi/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
automl/paramsklearn | tests/test_classification.py | 1 | 31256 | import os
import resource
import sys
import traceback
import unittest
import mock
import numpy as np
import sklearn.datasets
import sklearn.decomposition
import sklearn.cross_validation
import sklearn.ensemble
import sklearn.svm
from sklearn.utils.testing import assert_array_almost_equal
from HPOlibConfigSpace.configuration_space import ConfigurationSpace, \
Configuration
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.components.base import ParamSklearnPreprocessingAlgorithm
import ParamSklearn.components.classification as classification_components
import ParamSklearn.components.feature_preprocessing as preprocessing_components
from ParamSklearn.util import get_dataset
from ParamSklearn.constants import *
class TestParamSklearnClassifier(unittest.TestCase):
def test_io_dict(self):
classifiers = classification_components._classifiers
for c in classifiers:
if classifiers[c] == classification_components.ClassifierChoice:
continue
props = classifiers[c].get_properties()
self.assertIn('input', props)
self.assertIn('output', props)
inp = props['input']
output = props['output']
self.assertIsInstance(inp, tuple)
self.assertIsInstance(output, tuple)
for i in inp:
self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))
self.assertEqual(output, (PREDICTIONS,))
self.assertIn('handles_regression', props)
self.assertFalse(props['handles_regression'])
self.assertIn('handles_classification', props)
self.assertIn('handles_multiclass', props)
self.assertIn('handles_multilabel', props)
def test_find_classifiers(self):
classifiers = classification_components._classifiers
self.assertGreaterEqual(len(classifiers), 2)
for key in classifiers:
if hasattr(classifiers[key], 'get_components'):
continue
self.assertIn(ParamSklearnClassificationAlgorithm,
classifiers[key].__bases__)
def test_find_preprocessors(self):
preprocessors = preprocessing_components._preprocessors
self.assertGreaterEqual(len(preprocessors), 1)
for key in preprocessors:
if hasattr(preprocessors[key], 'get_components'):
continue
self.assertIn(ParamSklearnPreprocessingAlgorithm,
preprocessors[key].__bases__)
def test_default_configuration(self):
for i in range(2):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
X_train, Y_train, X_test, Y_test = get_dataset(dataset='iris')
auto = ParamSklearnClassifier(default)
auto = auto.fit(X_train, Y_train)
predictions = auto.predict(X_test)
self.assertAlmostEqual(0.9599999999999995,
sklearn.metrics.accuracy_score(predictions, Y_test))
scores = auto.predict_proba(X_test)
def test_repr(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
representation = repr(ParamSklearnClassifier(default))
cls = eval(representation)
self.assertIsInstance(cls, ParamSklearnClassifier)
def test_multilabel(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
dataset_properties = {'multilabel': True}
cs = ParamSklearnClassifier.get_hyperparameter_search_space(dataset_properties=dataset_properties)
print(cs)
cs.seed(5)
for i in range(50):
X, Y = sklearn.datasets.\
make_multilabel_classification(n_samples=150,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=True,
sparse=False,
return_indicator=True,
return_distributions=False,
random_state=1)
X_train = X[:100, :]
Y_train = Y[:100, :]
X_test = X[101:, :]
Y_test = Y[101:, ]
config = cs.sample_configuration()
config._populate_values()
if 'classifier:passive_aggressive:n_iter' in config:
config._values['classifier:passive_aggressive:n_iter'] = 5
if 'classifier:sgd:n_iter' in config:
config._values['classifier:sgd:n_iter'] = 5
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabilities = cls.predict_proba(X_test_)
[self.assertIsInstance(i, np.ndarray) for i in predicted_probabilities]
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
print(cs)
cs.seed(1)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_signed_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'signed': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls = ParamSklearnClassifier(config, random_state=1)
print(config)
try:
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
predictions = cls.predict(X_test)
self.assertIsInstance(predictions, np.ndarray)
predicted_probabiliets = cls.predict_proba(X_test_)
self.assertIsInstance(predicted_probabiliets, np.ndarray)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
print(traceback.format_exc())
raise e
except MemoryError as e:
continue
def test_configurations_sparse(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls = ParamSklearnClassifier(config, random_state=1)
try:
cls.fit(X_train, Y_train)
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_configurations_categorical_data(self):
# Use a limit of ~4GiB
limit = 4000 * 1024 * 1024
resource.setrlimit(resource.RLIMIT_AS, (limit, limit))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
print(cs)
for i in range(10):
config = cs.sample_configuration()
config._populate_values()
if config['classifier:passive_aggressive:n_iter'] is not None:
config._values['classifier:passive_aggressive:n_iter'] = 5
if config['classifier:sgd:n_iter'] is not None:
config._values['classifier:sgd:n_iter'] = 5
print(config)
categorical = [True, True, True, False, False, True, True, True,
False, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, False,
False, False, True, True, True]
this_directory = os.path.dirname(__file__)
X = np.loadtxt(os.path.join(this_directory, "components",
"data_preprocessing", "dataset.pkl"))
y = X[:, -1].copy()
X = X[:,:-1]
X_train, X_test, Y_train, Y_test = \
sklearn.cross_validation.train_test_split(X, y)
cls = ParamSklearnClassifier(config, random_state=1,)
try:
cls.fit(X_train, Y_train,
init_params={'one_hot_encoding:categorical_features': categorical})
predictions = cls.predict(X_test)
except ValueError as e:
if "Floating-point under-/overflow occurred at epoch" in \
e.args[0] or \
"removed all features" in e.args[0] or \
"all features are discarded" in e.args[0]:
continue
else:
print(config)
traceback.print_tb(sys.exc_info()[2])
raise e
except RuntimeWarning as e:
if "invalid value encountered in sqrt" in e.args[0]:
continue
elif "divide by zero encountered in" in e.args[0]:
continue
elif "invalid value encountered in divide" in e.args[0]:
continue
elif "invalid value encountered in true_divide" in e.args[0]:
continue
else:
print(config)
raise e
except UserWarning as e:
if "FastICA did not converge" in e.args[0]:
continue
else:
print(config)
raise e
def test_get_hyperparameter_search_space(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
self.assertIsInstance(cs, ConfigurationSpace)
conditions = cs.get_conditions()
self.assertEqual(len(cs.get_hyperparameter(
'rescaling:__choice__').choices), 4)
self.assertEqual(len(cs.get_hyperparameter(
'classifier:__choice__').choices), 16)
self.assertEqual(len(cs.get_hyperparameter(
'preprocessor:__choice__').choices), 14)
hyperparameters = cs.get_hyperparameters()
self.assertEqual(145, len(hyperparameters))
#for hp in sorted([str(h) for h in hyperparameters]):
# print hp
# The four parameters which are always active are classifier,
# preprocessor, imputation strategy and scaling strategy
self.assertEqual(len(hyperparameters) - 6, len(conditions))
def test_get_hyperparameter_search_space_include_exclude_models(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': ['libsvm_svc']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__'),
CategoricalHyperparameter('classifier:__choice__', ['libsvm_svc']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'classifier': ['libsvm_svc']})
self.assertNotIn('libsvm_svc', str(cs))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['select_percentile_classification']})
self.assertEqual(cs.get_hyperparameter('preprocessor:__choice__'),
CategoricalHyperparameter('preprocessor:__choice__',
['select_percentile_classification']))
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
exclude={'preprocessor': ['select_percentile_classification']})
self.assertNotIn('select_percentile_classification', str(cs))
def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'qda')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'preprocessor': ['nystroem_sampler']})
self.assertEqual(cs.get_hyperparameter('classifier:__choice__').default,
'sgd')
def test_get_hyperparameter_search_space_only_forbidden_combinations(self):
self.assertRaisesRegexp(AssertionError, "No valid pipeline found.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['multinomial_nb'],
'preprocessor': ['pca']},
dataset_properties={'sparse':True})
# It must also be catched that no classifiers which can handle sparse
# data are located behind the densifier
self.assertRaisesRegexp(ValueError, "Cannot find a legal default "
"configuration.",
ParamSklearnClassifier.get_hyperparameter_search_space,
include={'classifier': ['liblinear_svc'],
'preprocessor': ['densifier']},
dataset_properties={'sparse': True})
@unittest.skip("Wait until HPOlibConfigSpace is fixed.")
def test_get_hyperparameter_search_space_dataset_properties(self):
cs_mc = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multiclass': True})
self.assertNotIn('bernoulli_nb', str(cs_mc))
cs_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True})
self.assertNotIn('k_nearest_neighbors', str(cs_ml))
self.assertNotIn('liblinear', str(cs_ml))
self.assertNotIn('libsvm_svc', str(cs_ml))
self.assertNotIn('sgd', str(cs_ml))
cs_sp = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
self.assertIn('extra_trees', str(cs_sp))
self.assertIn('gradient_boosting', str(cs_sp))
self.assertIn('random_forest', str(cs_sp))
cs_mc_ml = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'multilabel': True, 'multiclass': True})
self.assertEqual(cs_ml, cs_mc_ml)
def test_predict_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
cls = ParamSklearnClassifier(default)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": "True",
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
"rescaling:__choice__": "min/max"})
cls = ParamSklearnClassifier(config)
# Multiclass
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647,), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_)
cls.pipeline_ = cls_predict
prediction = cls.predict(X_test, batch_size=20)
self.assertEqual((1647, 2), prediction.shape)
self.assertEqual(83, cls_predict.predict.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
# Multiclass
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(default)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
def test_predict_proba_batched_sparse(self):
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
dataset_properties={'sparse': True})
config = Configuration(cs,
values={"balancing:strategy": "none",
"classifier:__choice__": "random_forest",
"imputation:strategy": "mean",
"one_hot_encoding:minimum_fraction": 0.01,
"one_hot_encoding:use_minimum_fraction": 'True',
"preprocessor:__choice__": "no_preprocessing",
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:max_features': 0.5,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:n_estimators': 100,
"rescaling:__choice__": "min/max"})
# Multiclass
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
# The object behind the last step in the pipeline
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertEqual((1647, 10), prediction.shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
# Multilabel
cls = ParamSklearnClassifier(config)
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=True)
Y_train = np.array([(y, 26 - y) for y in Y_train])
cls.fit(X_train, Y_train)
X_test_ = X_test.copy()
prediction_ = cls.predict_proba(X_test_)
cls_predict = mock.Mock(wraps=cls.pipeline_.steps[-1][1])
cls.pipeline_.steps[-1] = ("estimator", cls_predict)
prediction = cls.predict_proba(X_test, batch_size=20)
self.assertIsInstance(prediction, list)
self.assertEqual(2, len(prediction))
self.assertEqual((1647, 10), prediction[0].shape)
self.assertEqual((1647, 10), prediction[1].shape)
self.assertEqual(84, cls_predict.predict_proba.call_count)
assert_array_almost_equal(prediction_, prediction)
@unittest.skip("test_check_random_state Not yet Implemented")
def test_check_random_state(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_X Not yet Implemented")
def test_validate_input_X(self):
raise NotImplementedError()
@unittest.skip("test_validate_input_Y Not yet Implemented")
def test_validate_input_Y(self):
raise NotImplementedError()
def test_set_params(self):
pass
def test_get_params(self):
pass
| bsd-3-clause |
dariox2/CADL | test/testyida6b.py | 1 | 4901 |
#
# test shuffle_batch - 6b
#
# generates a pair of files (color+bn)
# pending: make the tuple match
#
print("Loading tensorflow...")
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from libs import utils
import datetime
tf.set_random_seed(1)
def create_input_pipeline_yida(files1, files2, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=1, seed=None):
producer1 = tf.train.string_input_producer(
files1, capacity=len(files1), shuffle=False)
producer2 = tf.train.string_input_producer(
files2, capacity=len(files2), shuffle=False)
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys1, vals1 = reader.read(producer1)
keys2, vals2 = reader.read(producer2)
# And then have to decode its contents as we know it is a jpeg image
imgs1 = tf.image.decode_jpeg(vals1, channels=3)
imgs2 = tf.image.decode_jpeg(vals2, channels=3)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs1.set_shape(shape)
imgs2.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs1 = tf.image.resize_images(imgs1, rsz_shape[0], rsz_shape[1])
rszs2 = tf.image.resize_images(imgs2, rsz_shape[0], rsz_shape[1])
crops1 = (tf.image.resize_image_with_crop_or_pad(
rszs1, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs1)
crops2 = (tf.image.resize_image_with_crop_or_pad(
rszs2, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs2)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files1) // 5
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops1, crops2],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads,
#seed=seed,
)#shapes=(64,64,3))
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def CELEByida(path):
fs = [os.path.join(path, f)
for f in os.listdir(path) if f.endswith('.jpg')]
fs=sorted(fs)
return fs
print("Loading celebrities...")
from libs.datasets import CELEB
files1 = CELEByida("../session-1/img_align_celeba/") # only 100
files2 = CELEByida("../session-1/img_align_celeba_n/") # only 100
from libs.dataset_utils import create_input_pipeline
batch_size = 8
n_epochs = 3
input_shape = [218, 178, 3]
crop_shape = [64, 64, 3]
crop_factor = 0.8
seed=15
batch1 = create_input_pipeline_yida(
files1=files1, files2=files2,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape,
seed=seed)
mntg=[]
sess = tf.Session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
batres = sess.run(batch1)
batch_xs1=np.array(batres[0])
batch_xs2=np.array(batres[1])
for i in range(0,len(batch_xs1)):
img=batch_xs1[i] / 255.0
mntg.append(img)
img=batch_xs2[i] / 255.0
mntg.append(img)
TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S")
m=utils.montage(mntg, saveto="montage_"+TID+".png")
# mntg[0]=color
# mntg[1]=b/n
plt.figure(figsize=(5, 5))
plt.imshow(m)
plt.show()
# eop
| apache-2.0 |
alphacsc/alphacsc | examples/csc/plot_lfp_data.py | 1 | 3791 | """
==============================
CSC to learn LFP spiking atoms
==============================
Here, we show how CSC can be used to learn spiking
atoms from Local Field Potential (LFP) data [1].
[1] Hitziger, Sebastian, et al.
Adaptive Waveform Learning: A Framework for Modeling Variability in
Neurophysiological Signals. IEEE Transactions on Signal Processing (2017).
"""
###############################################################################
# First, let us fetch the data (~14 MB)
import os
from mne.utils import _fetch_file
url = ('https://github.com/hitziger/AWL/raw/master/Experiments/data/'
'LFP_data_contiguous_1250_Hz.mat')
fname = './LFP_data_contiguous_1250_Hz.mat'
if not os.path.exists(fname):
_fetch_file(url, fname)
###############################################################################
# It is a mat file, so we use scipy to load it
from scipy import io
data = io.loadmat(fname)
X, sfreq = data['X'].T, float(data['sfreq'])
###############################################################################
# And now let us look at the data
import numpy as np
import matplotlib.pyplot as plt
start, stop = 11000, 15000
times = np.arange(start, stop) / sfreq
plt.plot(times, X[0, start:stop], color='b')
plt.xlabel('Time (s)')
plt.ylabel(r'$\mu$ V')
plt.xlim([9., 12.])
###############################################################################
# and filter it using a convenient function from MNE. This will remove low
# frequency drifts, but we keep the high frequencies
from mne.filter import filter_data
X = filter_data(
X.astype(np.float64), sfreq, l_freq=1, h_freq=None, fir_design='firwin')
###############################################################################
# Now, we define the parameters of our model.
reg = 6.0
n_times = 2500
n_times_atom = 350
n_trials = 100
n_atoms = 3
n_iter = 60
###############################################################################
# Let's stick to one random state for now, but if you want to learn how to
# select the random state, consult :ref:`this example
# <sphx_glr_auto_examples_plot_simulate_randomstate.py>`.
random_state = 10
###############################################################################
# Now, we epoch the trials
overlap = 0
starts = np.arange(0, X.shape[1] - n_times, n_times - overlap)
stops = np.arange(n_times, X.shape[1], n_times - overlap)
X_new = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
if idx >= n_trials:
break
X_new.append(X[0, start:stop])
X_new = np.vstack(X_new)
del X
###############################################################################
# We remove the mean and scale to unit variance.
X_new -= np.mean(X_new)
X_new /= np.std(X_new)
###############################################################################
# The convolutions can result in edge artifacts at the edges of the trials.
# Therefore, we discount the contributions from the edges by windowing the
# trials.
from numpy import hamming
X_new *= hamming(n_times)[None, :]
###############################################################################
# Of course, in a data-limited setting we want to use as much of the data as
# possible. If this is the case, you can set `overlap` to non-zero (for example
# half the epoch length).
#
# Now, we run regular CSC since the trials are not too noisy
from alphacsc import learn_d_z
pobj, times, d_hat, z_hat, reg = learn_d_z(X_new, n_atoms, n_times_atom,
reg=reg, n_iter=n_iter,
random_state=random_state, n_jobs=1)
###############################################################################
# Let's look at the atoms now.
plt.figure()
plt.plot(d_hat.T)
plt.show()
| bsd-3-clause |
rbn920/feebb | feebb/test.py | 1 | 1640 | from feebb import *
import matplotlib.pyplot as plt
pre = Preprocessor()
pre.load_json('ex_json/test2.json')
elems = [Element(elem) for elem in pre.elements]
print(pre.supports)
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2m.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
pre = Preprocessor()
pre.load_json('ex_json/test2mmm.json')
elems = [Element(elem) for elem in pre.elements]
beam = Beam(elems, pre.supports)
post = Postprocessor(beam, 10)
print(max(post.interp('moment')))
print(min(post.interp('moment')))
plt.plot(post.interp('moment'))
plt.show()
print(max(post.interp('shear')))
print(min(post.interp('shear')))
plt.plot(post.interp('shear'))
plt.show()
| mit |
sradanov/flyingpigeon | setup.py | 1 | 1385 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'cdo',
'bokeh',
'ocgis',
'pandas',
'nose',
]
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='flyingpigeon',
version='0.2.0',
description='Processes for climate data, indices and extrem events',
long_description=README + '\n\n' + CHANGES,
classifiers=classifiers,
author='Nils Hempelmann',
author_email='nils.hempelmann@ipsl.jussieu.fr',
url='http://www.lsce.ipsl.fr/',
license = "http://www.apache.org/licenses/LICENSE-2.0",
keywords='wps flyingpigeon pywps malleefowl ipsl birdhouse conda anaconda',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=requires,
entry_points = {
'console_scripts': [
]}
,
)
| apache-2.0 |
Garrett-R/scikit-learn | examples/decomposition/plot_image_denoising.py | 84 | 5820 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
yunque/sms-tools | lectures/03-Fourier-properties/plots-code/symmetry-real-even.py | 26 | 1150 | import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
| agpl-3.0 |
nickgentoo/LSTM-timepredictionPMdata | code/nick_evaluate_suffix_and_remaining_time_only_time_OHenc.py | 1 | 15048 | '''
this script takes as input the LSTM or RNN weights found by train.py
change the path in line 178 of this script to point to the h5 file
with LSTM or RNN weights generated by train.py
Author: Niek Tax
'''
from __future__ import division
from keras.models import load_model
import csv
import copy
import numpy as np
import distance
from itertools import izip
from jellyfish._jellyfish import damerau_levenshtein_distance
import unicodecsv
from sklearn import metrics
from math import sqrt
import time
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from collections import Counter
from keras.models import model_from_json
import sys
fileprefix=sys.argv[1]
eventlog = sys.argv[2]
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
y_times = []
times = []
times2 = []
times3 = []
times4 = []
# nick
attributes = []
attributes_dict = []
attributes_sizes = []
numlines = 0
casestarttime = None
lasteventtime = None
csvfile = open('../data/%s' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
ascii_offset = 161
y = []
for row in spamreader:
#print(row)
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
#test different format
#t = 0#time.strptime(row[2], "%Y/%m/%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
#print (line)
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
#target
y_times.extend([times2[-1]-k for k in times2])
timeseqs3.append(times3)
timeseqs4.append(times4)
for i in xrange(len(attributes)):
#print(attributesvalues[i])
attributes[i].append(attributesvalues[i])
else:
#if firstline. I have to add te elements to attributes
for a in row[3:]:
attributes.append([])
attributes_dict.append({})
attributes_sizes.append(0)
#print(attributes)
n_events_in_trace=0
line = ''
times = []
times2 = []
times3 = []
times4 = []
attributesvalues = [ ]
numlines+=1
n_events_in_trace+=1
line+=unichr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday()
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
lasteventtime = t
firstLine = False
indexnick=0
for a in row[3:]:
if len(attributesvalues)<=indexnick:
attributesvalues.append([])
a=a.strip('"')
#todo cast a intero se e intero if
if a!="":
try:
attr=float(a)
attributesvalues[indexnick].append(attr)
#print("float attr")
#print(a)
except:
if a not in attributes_dict[indexnick]:
attributes_dict[indexnick][a]=attributes_sizes[indexnick]+1
attributes_sizes[indexnick]=attributes_sizes[indexnick]+1
attributesvalues[indexnick].append(attributes_dict[indexnick][a])
else:
attributesvalues[indexnick].append(-1)
# if a in attributes_dict[indexnick]:
# attributesvalues.append(attributes_dict[indexnick][a])
# else:
# attributes_dict[indexnick][a]=attributes_sizes[indexnick]
# attributes_sizes[indexnick]+=1
# attributesvalues.append(attributes_dict[indexnick][a])
indexnick+=1
# add last case
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
y_times.extend([times2[-1] - k for k in times2])
for i in xrange(len(attributes)):
attributes[i].append(attributesvalues[i])
numlines+=1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
print('divisor: {}'.format(divisor))
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
print('divisor2: {}'.format(divisor2))
step = 1
sentences = []
softness = 0
next_chars = []
lines = map(lambda x: x + '!', lines)
maxlen = max(map(lambda x: len(x), lines))
chars = map(lambda x: set(x), lines)
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
lines = map(lambda x: x[:-2], lines)
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
#print(indices_char)
elems_per_fold = int(round(numlines / 3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold1.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold1, fold1_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold2 = lines[elems_per_fold:2 * elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2 * elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2 * elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2 * elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2 * elems_per_fold]
with open('output_files/folds/' + eventlog + 'fold2.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold2, fold2_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
fold3 = lines[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
fold3_a=[a[2*elems_per_fold:] for a in attributes]
with open('output_files/folds/' + eventlog + 'fold3.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in izip(fold3, fold3_t):
spamwriter.writerow([unicode(s).encode("utf-8") + '#{}'.format(t) for s, t in izip(row, timeseq)])
y_t_seq=[]
for line in fold1+fold2:
for i in range(0, len(line), 1):
if i == 0:
continue
y_t_seq.append(y_times[0:i])
divisory = np.mean([item for sublist in y_t_seq for item in sublist])
print('divisory: {}'.format(divisory))
lines = fold3
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
attributes=fold3_a
# set parameters
predict_size = maxlen
# load json and create model
json_file = open('output_files/models/'+fileprefix+'_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("output_files/models/"+fileprefix+"_weights_best.h5")
print("Loaded model from disk")
y_t_seq=[]
# load model, set this to the model generated by train.py
#model = load_model('output_files/models/200_model_59-1.50.h5')
# define helper functions
def encode(ex, sentence, times,times2, times3,times4, sentences_attributes,maxlen=maxlen):
#num_features = len(chars)+5+len(sentences_attributes)
num_features = len(chars) + 5
for idx in xrange(len(attributes)):
num_features += attributes_sizes[idx] + 1
#print(num_features)
X = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen-len(sentence)
times2 = np.cumsum(times)
#print "sentence",len(sentence)
for t, char in enumerate(sentence):
#print(t)
#midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
#timesincemidnight = times3[t]-midnight
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char:
X[0, t+leftpad, char_indices[c]] = 1
X[0, t+leftpad, len(chars)] = t+1
X[0, t+leftpad, len(chars)+1] = times[t]/divisor
X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
X[0, t+leftpad, len(chars)+3] = times3[t]/86400
X[0, t+leftpad, len(chars)+4] = times4[t]/7
# for i in xrange(len(sentences_attributes)):
# #print(str(i)+" "+str(t))
# #print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5 + i] = sentences_attributes[i][t]
startoh = 0
for j in xrange(len(attributes)):
# X[i, t + leftpad, len(chars) + 5+j]=sentences_attributes[j][i][t]
if attributes_sizes[j] > 0:
X[0, t + leftpad, len(chars) + 5 + startoh + sentences_attributes[j][t]] = 1
else:
X[0, t + leftpad, len(chars) + 5 + startoh] = sentences_attributes[j][t]
startoh += (attributes_sizes[j] + 1)
return X
# # define helper functions
# def encode(sentence, times, times3, sentences_attributes,maxlen=maxlen):
# num_features = len(chars)+5+len(sentences_attributes)
# X = np.zeros((1, maxlen, num_features), dtype=np.float32)
# leftpad = maxlen-len(sentence)
# times2 = np.cumsum(times)
# print "sentence",len(sentence)
# for t, char in enumerate(sentence):
# midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
# timesincemidnight = times3[t]-midnight
# multiset_abstraction = Counter(sentence[:t+1])
# for c in chars:
# if c==char:
# X[0, t+leftpad, char_indices[c]] = 1
# X[0, t+leftpad, len(chars)] = t+1
# X[0, t+leftpad, len(chars)+1] = times[t]/divisor
# X[0, t+leftpad, len(chars)+2] = times2[t]/divisor2
# X[0, t+leftpad, len(chars)+3] = timesincemidnight.seconds/86400
# X[0, t+leftpad, len(chars)+4] = times3[t].weekday()/7
# for i in xrange(len(sentences_attributes)):
# print(str(i)+" "+str(t))
# print(sentences_attributes[i][t])
# #nick check the zero, it is there because it was a list
# X[0, t + leftpad, len(chars) + 5+i]=sentences_attributes[i][t]
# return X,y
def getSymbol(predictions):
maxPrediction = 0
symbol = ''
i = 0;
for prediction in predictions:
if(prediction>=maxPrediction):
maxPrediction = prediction
symbol = target_indices_char[i]
i += 1
return symbol
one_ahead_gt = []
one_ahead_pred = []
two_ahead_gt = []
two_ahead_pred = []
three_ahead_gt = []
three_ahead_pred = []
y_t_seq=[]
# make predictions
with open('output_files/results/'+fileprefix+'_suffix_and_remaining_time_%s' % eventlog, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(["Prefix length", "Groud truth", "Ground truth times", "Predicted times", "RMSE", "MAE", "Median AE"])
#considering also size 1 prefixes
#for prefix_size in range(1,maxlen):
#print(prefix_size)
#print(len(lines),len(attributes[0]))
for ex, (line, times, times2, times3, times4) in enumerate(izip(lines, lines_t, lines_t2, lines_t3, lines_t3)):
for prefix_size in range(1, len(line)):#aggiunto -1 perche non voglio avere 0 nel ground truth
#print(line,ex,len(line), len(attributes[0][ex]))
times.append(0)
cropped_line = ''.join(line[:prefix_size])
cropped_times = times[:prefix_size]
#print "times_len",len(cropped_times)
cropped_times2 = times2[:prefix_size]
cropped_times4 = times4[:prefix_size]
cropped_times3 = times3[:prefix_size]
cropped_attributes = [[] for i in xrange(len(attributes))]
for j in xrange(len(attributes)):
#print(attributes[j][ex])
cropped_attributes[j].extend(attributes[j][ex][0:prefix_size])
#print cropped_attributes
#y_t_seq.append(y_times[0:prefix_size])
#cropped_attributes= [a[:prefix_size] for a in attributes]
#print cropped_attribute
ground_truth = ''.join(line[prefix_size:prefix_size+predict_size])
ground_truth_t = times2[prefix_size-1] # era -1
#print(prefix_size,len(times2)-1)
case_end_time = times2[len(times2)-1]
ground_truth_t = case_end_time-ground_truth_t
predicted = ''
total_predicted_time = 0
#perform single prediction
enc = encode(ex,cropped_line, cropped_times,cropped_times2, cropped_times3,cropped_times4, cropped_attributes)
y = model.predict(enc, verbose=0) # make predictions
# split predictions into seperate activity and time predictions
#print y
y_t = y[0][0]
#prediction = getSymbol(y_char) # undo one-hot encoding
#cropped_line += prediction
if y_t<0:
y_t=0
cropped_times.append(y_t)
y_t = y_t * divisor
#cropped_times3.append(cropped_times3[-1] + timedelta(seconds=y_t))
total_predicted_time = total_predicted_time + y_t
output = []
if len(ground_truth)>0:
output.append(prefix_size)
output.append(unicode(ground_truth).encode("utf-8"))
output.append(ground_truth_t)
output.append(total_predicted_time)
output.append(metrics.mean_squared_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.mean_absolute_error([ground_truth_t], [total_predicted_time]))
output.append(metrics.median_absolute_error([ground_truth_t], [total_predicted_time]))
spamwriter.writerow(output)
| gpl-3.0 |
shoyer/xarray | xarray/tests/test_variable.py | 1 | 87655 | import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import pytz
from xarray import Coordinate, Dataset, IndexVariable, Variable, set_options
from xarray.core import dtypes, duck_array_ops, indexing
from xarray.core.common import full_like, ones_like, zeros_like
from xarray.core.indexing import (
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
LazilyOuterIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
)
from xarray.core.pycompat import dask_array_type
from xarray.core.utils import NDArrayMixin
from xarray.core.variable import as_compatible_data, as_variable
from xarray.tests import requires_bottleneck
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
requires_sparse,
source_ndarray,
)
_PAD_XR_NP_ARGS = [
[{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))],
[{"x": 1}, ((1, 1), (0, 0), (0, 0))],
[{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))],
[{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))],
[{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))],
]
class VariableSubclassobjects:
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(["time"], data, {"foo": "bar"})
assert v.dims == ("time",)
assert_array_equal(v.values, data)
assert v.dtype == float
assert v.shape == (10,)
assert v.size == 10
assert v.sizes == {"time": 10}
assert v.nbytes == 80
assert v.ndim == 1
assert len(v) == 10
assert v.attrs == {"foo": "bar"}
def test_attrs(self):
v = self.cls(["time"], 0.5 * np.arange(10))
assert v.attrs == {}
attrs = {"foo": "bar"}
v.attrs = attrs
assert v.attrs == attrs
assert isinstance(v.attrs, dict)
v.attrs["foo"] = "baz"
assert v.attrs["foo"] == "baz"
def test_getitem_dict(self):
v = self.cls(["x"], np.random.randn(5))
actual = v[{"x": 0}]
expected = v[0]
assert_identical(expected, actual)
def test_getitem_1d(self):
data = np.array([0, 1, 2])
v = self.cls(["x"], data)
v_new = v[dict(x=[0, 1])]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=slice(None))]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
v_new = v[dict(x=Variable("a", [0, 1]))]
assert v_new.dims == ("a",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=1)]
assert v_new.dims == ()
assert_array_equal(v_new, data[1])
# tuple argument
v_new = v[slice(None)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
def test_getitem_1d_fancy(self):
v = self.cls(["x"], [0, 1, 2])
# 1d-variable should be indexable by multi-dimensional Variable
ind = Variable(("a", "b"), [[0, 1], [0, 1]])
v_new = v[ind]
assert v_new.dims == ("a", "b")
expected = np.array(v._data)[([0, 1], [0, 1]), ...]
assert_array_equal(v_new, expected)
# boolean indexing
ind = Variable(("x",), [True, False, True])
v_new = v[ind]
assert_identical(v[[0, 2]], v_new)
v_new = v[[True, False, True]]
assert_identical(v[[0, 2]], v_new)
with raises_regex(IndexError, "Boolean indexer should"):
ind = Variable(("a",), [True, False, True])
v[ind]
def test_getitem_with_mask(self):
v = self.cls(["x"], [0, 1, 2])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1])
)
assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1]))
assert_identical(
v._getitem_with_mask([0, -1, 1], fill_value=-99),
self.cls(["x"], [0, -99, 1]),
)
def test_getitem_with_mask_size_zero(self):
v = self.cls(["x"], [])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([-1, -1, -1]),
self.cls(["x"], [np.nan, np.nan, np.nan]),
)
def test_getitem_with_mask_nd_indexer(self):
v = self.cls(["x"], [0, 1, 2])
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer)
def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
assert variable[0].shape == ()
assert variable[0].ndim == 0
assert variable[0].size == 1
# test identity
assert variable.equals(variable.copy())
assert variable.identical(variable.copy())
# check value is equal for both ndarray and Variable
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
np.testing.assert_equal(variable.values[0], expected_value0)
np.testing.assert_equal(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
assert type(variable.values[0]) == type(expected_value0)
assert type(variable[0].values) == type(expected_value0)
elif expected_dtype is not False:
assert variable.values[0].dtype == expected_dtype
assert variable[0].values.dtype == expected_dtype
def test_index_0d_int(self):
for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
value = "foo"
dtype = np.dtype("U3")
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(["x"], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
x = self.cls(["x"], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(["x"], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
x = self.cls(["x"], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
def test_index_0d_not_a_time(self):
d = np.datetime64("NaT", "ns")
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper:
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return "{}(item={!r})".format(type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls("x", [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls("x", listarray)
assert_array_equal(x.data, listarray)
assert_array_equal(x[0].data, listarray.squeeze())
assert_array_equal(x.squeeze().data, listarray.squeeze())
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range("2011-09-01", periods=10)
for dates in [date_range, date_range.values, date_range.to_pydatetime()]:
expected = self.cls("t", dates)
for times in [
[expected[i] for i in range(10)],
[expected[i : (i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)],
]:
actual = Variable.concat(times, "t")
assert expected.dtype == actual.dtype
assert_array_equal(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls("time", pd.date_range("2000-01-01", periods=5))
expected = np.datetime64("2000-01-01", "ns")
assert x[0].values == expected
def test_datetime64_conversion(self):
times = pd.date_range("2000-01-01", periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("datetime64[s]"), False),
(times.to_pydatetime(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("datetime64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("timedelta64[s]"), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("timedelta64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls("x", data)
assert actual.dtype == data.dtype
def test_pandas_data(self):
v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1]))
assert_identical(v, v[[0, 1, 2]])
v = self.cls(["x"], pd.Index([0, 1, 2]))
assert v[0].values == v.values[0]
def test_pandas_period_index(self):
v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="B"))
v = v.load() # for dask-based Variable
assert v[0] == pd.Period("2000", freq="B")
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(["x"], x)
base_v = v.to_base_variable()
# unary ops
assert_identical(base_v, +v)
assert_identical(base_v, abs(v))
assert_array_equal((-v).values, -x)
# binary ops with numbers
assert_identical(base_v, v + 0)
assert_identical(base_v, 0 + v)
assert_identical(base_v, v * 1)
# binary ops with numpy arrays
assert_array_equal((v * x).values, x ** 2)
assert_array_equal((x * v).values, x ** 2)
assert_array_equal(v - y, v - 1)
assert_array_equal(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(["x"], x, {"units": "meters"})
assert_identical(base_v, +v2)
# binary ops with all variables
assert_array_equal(v + v, 2 * v)
w = self.cls(["x"], y, {"foo": "bar"})
assert_identical(v + w, self.cls(["x"], x + y).to_base_variable())
assert_array_equal((v * w).values, x * y)
# something complicated
assert_array_equal((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
assert float == (+v).dtype
assert float == (+v).values.dtype
assert float == (0 + v).dtype
assert float == (0 + v).values.dtype
# check types of returned data
assert isinstance(+v, Variable)
assert not isinstance(+v, IndexVariable)
assert isinstance(0 + v, Variable)
assert not isinstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(["x"], x)
actual = v.sum()
expected = Variable((), 10)
assert_identical(expected, actual)
assert type(actual) is Variable
def test_array_interface(self):
x = np.arange(5)
v = self.cls(["x"], x)
assert_array_equal(np.asarray(v), x)
# test patched in methods
assert_array_equal(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
assert_identical(v.argsort(), v.to_base_variable())
assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable())
# test ufuncs
assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable())
assert isinstance(np.sin(v), Variable)
assert not isinstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range("2000-01-01", periods=3),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
assert_array_equal(v.values, np.asarray(data))
assert_array_equal(np.asarray(v), np.asarray(data))
assert v[0].values == np.asarray(data)[0]
assert np.asarray(v[0]) == np.asarray(data)[0]
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
assert v.equals(v2)
assert v.identical(v2)
assert v.no_conflicts(v2)
assert v[0].equals(v2[0])
assert v[0].identical(v2[0])
assert v[0].no_conflicts(v2[0])
assert v[:2].equals(v2[:2])
assert v[:2].identical(v2[:2])
assert v[:2].no_conflicts(v2[:2])
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable("x", 3 * [False])
for v, _ in self.example_1d_objects():
actual = "z" == v
assert_identical(expected, actual)
actual = ~("z" != v)
assert_identical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2})
for actual in [
expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({"x": 3}),
expected.copy(deep=True),
expected.copy(deep=False),
]:
assert_identical(expected.to_base_variable(), actual.to_base_variable())
assert expected.encoding == actual.encoding
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(["a"], x)
w = self.cls(["a"], y)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
with raises_regex(ValueError, "Variable has dimensions"):
Variable.concat([v, Variable(["c"], y)], "b")
# test indexers
actual = Variable.concat(
[v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a"
)
expected = Variable("a", np.array([x, y]).ravel(order="F"))
assert_identical(expected, actual)
# test concatenating along a dimension
v = Variable(["time", "x"], np.random.random((10, 8)))
assert_identical(v, Variable.concat([v[:5], v[5:]], "time"))
assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time"))
assert_identical(v, Variable.concat([v[:1], v[1:]], "time"))
# test dimension order
assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x"))
with raises_regex(ValueError, "all input arrays must have"):
Variable.concat([v[:, 0], v[:, 1:]], "x")
def test_concat_attrs(self):
# always keep attrs from first variable
v = self.cls("a", np.arange(5), {"foo": "bar"})
w = self.cls("a", np.ones(5))
expected = self.cls(
"a", np.concatenate([np.arange(5), np.ones(5)])
).to_base_variable()
expected.attrs["foo"] = "bar"
assert_identical(expected, Variable.concat([v, w], "a"))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ["S", "U"]:
x = self.cls("animal", np.array(["horse"], dtype=kind))
y = self.cls("animal", np.array(["aardvark"], dtype=kind))
actual = Variable.concat([x, y], "animal")
expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
assert_equal(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls("x", ["0", "1", "2"])
b = self.cls("x", ["3", "4"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.arange(5).astype(str))
assert_identical(expected, actual)
assert actual.dtype.kind == expected.dtype.kind
def test_concat_mixed_dtypes(self):
a = self.cls("x", [0, 1])
b = self.cls("x", ["two"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.array([0, 1, "two"], dtype=object))
assert_identical(expected, actual)
assert actual.dtype == object
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("astype", [float, int, str])
def test_copy(self, deep, astype):
v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"})
w = v.copy(deep=deep)
assert type(v) is type(w)
assert_identical(v, w)
assert v.dtype == w.dtype
if self.cls is Variable:
if deep:
assert source_ndarray(v.values) is not source_ndarray(w.values)
else:
assert source_ndarray(v.values) is source_ndarray(w.values)
assert_identical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
v = self.cls("x", midx)
for deep in [True, False]:
w = v.copy(deep=deep)
assert isinstance(w._data, PandasIndexAdapter)
assert isinstance(w.to_index(), pd.MultiIndex)
assert_array_equal(v._data.array, w._data.array)
def test_copy_with_data(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = np.array([[2.5, 5.0], [7.1, 43]])
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
def test_copy_with_data_errors(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = [2.5, 5.0]
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
def test_copy_index_with_data(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 10)
actual = orig.copy(data=new_data)
expected = IndexVariable("x", np.arange(5, 10))
assert_identical(expected, actual)
def test_copy_index_with_data_errors(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 20)
with raises_regex(ValueError, "must match shape of object"):
orig.copy(data=new_data)
with raises_regex(ValueError, "Cannot assign to the .data"):
orig.data = new_data
with raises_regex(ValueError, "Cannot assign to the .values"):
orig.values = new_data
def test_replace(self):
var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
result = var._replace()
assert_identical(result, var)
new_data = np.arange(4).reshape(2, 2)
result = var._replace(data=new_data)
assert_array_equal(result.data, new_data)
def test_real_and_imag(self):
v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"})
expected_re = self.cls("x", np.arange(3), {"foo": "bar"})
assert_identical(v.real, expected_re)
expected_im = self.cls("x", -np.arange(3), {"foo": "bar"})
assert_identical(v.imag, expected_im)
expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
assert_allclose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls("x", [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
assert_allclose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype="int64"))
v = self.cls("x", data)
print(v) # should not error
assert v.dtype == "int64"
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(
start="2000-01-01",
tz=pytz.timezone("America/New_York"),
periods=10,
freq="1h",
)
v = self.cls("x", data)
print(v) # should not error
if "America/New_York" in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == "object"
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
v = self.cls("x", idx)
assert_identical(Variable((), ("a", 0)), v[0])
assert_identical(v, v[:])
def test_load(self):
array = self.cls("x", np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
if array.chunks is None:
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
assert_identical(array, copied)
def test_getitem_advanced(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
# orthogonal indexing
v_new = v[([0, 1], [1, 0])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]])
v_new = v[[0, 1]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]])
# with mixed arguments
ind = Variable(["a"], [0, 1])
v_new = v[dict(x=[0, 1], y=ind)]
assert v_new.dims == ("x", "a")
assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]])
# boolean indexing
v_new = v[dict(x=[True, False], y=[False, True, False])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[0][1])
# with scalar variable
ind = Variable((), 2)
v_new = v[dict(y=ind)]
expected = v[dict(y=2)]
assert_array_equal(v_new, expected)
# with boolean variable with wrong shape
ind = np.array([True, False])
with raises_regex(IndexError, "Boolean array size 2 is "):
v[Variable(("a", "b"), [[0, 1]]), ind]
# boolean indexing with different dimension
ind = Variable(["a"], [True, False, False])
with raises_regex(IndexError, "Boolean indexer should be"):
v[dict(y=ind)]
def test_getitem_uint_1d(self):
# regression test for #1405
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[0])
def test_getitem_uint(self):
# regression test for #1405
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.uint64(0)]
assert_array_equal(v_new, v_data[0, :])
def test_getitem_0d_array(self):
# make sure 0d-np.array can be used as an indexer
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])[0]]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array(0)]
assert_array_equal(v_new, v_data[0])
v_new = v[Variable((), np.array(0))]
assert_array_equal(v_new, v_data[0])
def test_getitem_fancy(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
# It would be ok if indexed with the multi-dimensional array including
# the same name
ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("x", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]])
v_new = v[dict(y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=[1, 0], y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[[1, 0]][:, ind])
# along diagonal
ind = Variable(["a"], [0, 1])
v_new = v[ind, ind]
assert v_new.dims == ("a",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with integer
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=0, y=ind)]
assert v_new.dims == ("a", "b")
assert_array_equal(v_new[0], v_data[0][[0, 0]])
assert_array_equal(v_new[1], v_data[0][[1, 1]])
# with slice
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=slice(None), y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None, 1))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)])
# slice matches explicit dimension
ind = Variable(["y"], [0, 1])
v_new = v[ind, :2]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with multiple slices
v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]])
ind = Variable(["a", "b"], [[0]])
v_new = v[ind, :, :]
expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...])
assert_identical(v_new, expected)
v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]])
ind = Variable(["y"], [0])
v_new = v[ind, :, 1:2, 2]
expected = Variable(["y", "x"], [[6]])
assert_identical(v_new, expected)
# slice and vector mixed indexing resulting in the same dimension
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1, 2])
v_new = v[:, ind]
expected = Variable(("x", "z"), np.zeros((3, 5)))
expected[0] = v.data[0, 0]
expected[1] = v.data[1, 1]
expected[2] = v.data[2, 2]
assert_identical(v_new, expected)
v_new = v[:, ind.data]
assert v_new.shape == (3, 3, 5)
def test_getitem_error(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
with raises_regex(IndexError, "labeled multi-"):
v[[[0, 1], [1, 2]]]
ind_x = Variable(["a"], [0, 1, 1])
ind_y = Variable(["a"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers "):
v[ind_x, ind_y]
ind = Variable(["a", "b"], [[True, False], [False, True]])
with raises_regex(IndexError, "2-dimensional boolean"):
v[dict(x=ind)]
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1])
with raises_regex(IndexError, "Dimensions of indexers mis"):
v[:, ind]
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
pytest.param(
"linear_ramp",
marks=pytest.mark.xfail(
reason="pint bug: https://github.com/hgrecco/pint/issues/1026"
),
),
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode=mode, **xr_arg)
expected = np.pad(data, np_arg, mode=mode)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(**xr_arg)
expected = np.pad(
np.array(v.data.astype(float)),
np_arg,
mode="constant",
constant_values=np.nan,
)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode="constant", constant_values=False, **xr_arg)
expected = np.pad(
np.array(v.data), np_arg, mode="constant", constant_values=False
)
assert_array_equal(actual, expected)
def test_rolling_window(self):
# Just a working test. See test_nputils for the algorithm validation
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for (d, w) in [("x", 3), ("y", 5)]:
v_rolling = v.rolling_window(d, w, d + "_window")
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
v_rolling = v.rolling_window(d, w, d + "_window", center=True)
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
# dask and numpy result should be the same
v_loaded = v.load().rolling_window(d, w, d + "_window", center=True)
assert_array_equal(v_rolling, v_loaded)
# numpy backend should not be over-written
if isinstance(v._data, np.ndarray):
with pytest.raises(ValueError):
v_loaded[0] = 1.0
class TestVariable(VariableSubclassobjects):
cls = staticmethod(Variable)
@pytest.fixture(autouse=True)
def setup(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(["time", "x"], self.d)
assert_array_equal(v.data, self.d)
assert_array_equal(v.values, self.d)
assert source_ndarray(v.values) is self.d
with pytest.raises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
assert source_ndarray(v.values) is d2
d3 = np.random.random((10, 3))
v.data = d3
assert source_ndarray(v.data) is d3
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
assert v.item() == 0
assert type(v.item()) is float
v = IndexVariable("x", np.arange(5))
assert 2 == v.searchsorted(2)
def test_datetime64_conversion_scalar(self):
expected = np.datetime64("2000-01-01", "ns")
for values in [
np.datetime64("2000-01-01"),
pd.Timestamp("2000-01-01T00"),
datetime(2000, 1, 1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("datetime64[ns]")
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, "ns")
for values in [
np.timedelta64(1, "D"),
pd.Timedelta("1 day"),
timedelta(days=1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("timedelta64[ns]")
def test_0d_str(self):
v = Variable([], "foo")
assert v.dtype == np.dtype("U3")
assert v.values == "foo"
v = Variable([], np.string_("foo"))
assert v.dtype == np.dtype("S3")
assert v.values == bytes("foo", "ascii")
def test_0d_datetime(self):
v = Variable([], pd.Timestamp("2000-01-01"))
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == np.datetime64("2000-01-01", "ns")
def test_0d_timedelta(self):
for td in [pd.to_timedelta("1s"), np.timedelta64(1, "s")]:
v = Variable([], td)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == np.timedelta64(10 ** 9, "ns")
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
assert v1.equals(v2)
assert v1.identical(v2)
v3 = Variable(("dim1", "dim3"), data=d)
assert not v1.equals(v3)
v4 = Variable(("dim1", "dim2"), data=d)
assert v1.equals(v4)
assert not v1.identical(v4)
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
assert not v1.equals(v5)
assert not v1.equals(None)
assert not v1.equals(d)
assert not v1.identical(None)
assert not v1.identical(d)
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(("x"), [np.nan, np.nan])
assert v1.broadcast_equals(v2)
assert not v1.equals(v2)
assert not v1.identical(v2)
v3 = Variable(("x"), [np.nan])
assert v1.broadcast_equals(v3)
assert not v1.equals(v3)
assert not v1.identical(v3)
assert not v1.broadcast_equals(None)
v4 = Variable(("x"), [np.nan] * 3)
assert not v2.broadcast_equals(v4)
def test_no_conflicts(self):
v1 = Variable(("x"), [1, 2, np.nan, np.nan])
v2 = Variable(("x"), [np.nan, 2, 3, np.nan])
assert v1.no_conflicts(v2)
assert not v1.equals(v2)
assert not v1.broadcast_equals(v2)
assert not v1.identical(v2)
assert not v1.no_conflicts(None)
v3 = Variable(("y"), [np.nan, 2, 3, np.nan])
assert not v3.no_conflicts(v1)
d = np.array([1, 2, np.nan, np.nan])
assert not v1.no_conflicts(d)
assert not v2.no_conflicts(d)
v4 = Variable(("w", "x"), [d])
assert v1.no_conflicts(v4)
def test_as_variable(self):
data = np.arange(10)
expected = Variable("x", data)
expected_extra = Variable(
"x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1}
)
assert_identical(expected, as_variable(expected))
ds = Dataset({"x": expected})
var = as_variable(ds["x"]).to_base_variable()
assert_identical(expected, var)
assert not isinstance(ds["x"], Variable)
assert isinstance(as_variable(ds["x"]), Variable)
xarray_tuple = (
expected_extra.dims,
expected_extra.values,
expected_extra.attrs,
expected_extra.encoding,
)
assert_identical(expected_extra, as_variable(xarray_tuple))
with raises_regex(TypeError, "tuple of form"):
as_variable(tuple(data))
with raises_regex(ValueError, "tuple of form"): # GH1016
as_variable(("five", "six", "seven"))
with raises_regex(TypeError, "without an explicit list of dimensions"):
as_variable(data)
actual = as_variable(data, name="x")
assert_identical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
assert_identical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(("x", "y"), data)
with raises_regex(ValueError, "without explicit dimension names"):
as_variable(data, name="x")
with raises_regex(ValueError, "has more than 1-dimension"):
as_variable(expected, name="x")
# test datetime, timedelta conversion
dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)])
assert as_variable(dt, "time").dtype.kind == "M"
td = np.array([timedelta(days=x) for x in range(10)])
assert as_variable(td, "time").dtype.kind == "m"
def test_repr(self):
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
expected = dedent(
"""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
"""
).strip()
assert expected == repr(v)
def test_repr_lazy_data(self):
v = Variable("x", LazilyOuterIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
assert isinstance(v._data, LazilyOuterIndexedArray)
def test_detect_indexer_type(self):
""" Tests indexer type was correctly detected. """
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
_, ind, _ = v._broadcast_indexes((0, 1))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2)))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, [0, 1]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], 1))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], [1, 2]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("y",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, vind))
assert type(ind) == indexing.VectorizedIndexer
vind = Variable(("a", "b"), [[0, 2], [1, 3]])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.VectorizedIndexer
def test_indexer_type(self):
# GH:issue:1688. Wrong indexer type induces NotImplementedError
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
def assert_indexer_type(key, object_type):
dims, index_tuple, new_order = v._broadcast_indexes(key)
assert isinstance(index_tuple, object_type)
# should return BasicIndexer
assert_indexer_type((0, 1), BasicIndexer)
assert_indexer_type((0, slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer)
# should return OuterIndexer
assert_indexer_type(([0, 1], 1), OuterIndexer)
assert_indexer_type(([0, 1], [1, 2]), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer)
assert_indexer_type(
(Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer
)
# should return VectorizedIndexer
assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer)
assert_indexer_type(
(Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer
)
assert_indexer_type(
(
Variable(("a", "b"), [[0, 1], [1, 2]]),
Variable(("a", "b"), [[0, 1], [1, 2]]),
),
VectorizedIndexer,
)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
# test slicing
assert_identical(v, v[:])
assert_identical(v, v[...])
assert_identical(Variable(["y"], data[0]), v[0])
assert_identical(Variable(["x"], data[:, 0]), v[:, 0])
assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2])
# test array indexing
x = Variable(["x"], np.arange(10))
y = Variable(["y"], np.arange(11))
assert_identical(v, v[x.values])
assert_identical(v, v[x])
assert_identical(v[:3], v[x < 3])
assert_identical(v[:, 3:], v[:, y >= 3])
assert_identical(v[:3, 3:], v[x < 3, y >= 3])
assert_identical(v[:3, :2], v[x[:3], y[:2]])
assert_identical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
assert_identical(Variable(["y"], data[n]), item)
with raises_regex(TypeError, "iteration over a 0-d"):
iter(Variable([], 0))
# test setting
v.values[:] = 0
assert np.all(v.values == 0)
# test orthogonal setting
v[range(10), range(11)] = 1
assert_array_equal(v.values, np.ones((10, 11)))
def test_getitem_basic(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
# int argument
v_new = v[0]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
# slice argument
v_new = v[:2]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[:2])
# list arguments
v_new = v[[0]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[0]])
v_new = v[[]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[]])
# dict arguments
v_new = v[dict(x=0)]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=slice(None))]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=1)]
assert v_new.dims == ()
assert_array_equal(v_new, v._data[0, 1])
v_new = v[dict(y=1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# tuple argument
v_new = v[(slice(None), 1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# test that we obtain a modifiable view when taking a 0d slice
v_new = v[0, 0]
v_new[...] += 99
assert_array_equal(v_new, v._data[0, 0])
def test_getitem_with_mask_2d_input(self):
v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]])
assert_identical(
v._getitem_with_mask(([-1, 0], [1, -1])),
Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]),
)
assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v)
def test_isel(self):
v = Variable(["time", "x"], self.d)
assert_identical(v.isel(time=slice(None)), v)
assert_identical(v.isel(time=0), v[0])
assert_identical(v.isel(time=slice(0, 3)), v[:3])
assert_identical(v.isel(x=0), v[:, 0])
assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]])
assert_identical(v.isel(time=[]), v[[]])
with raises_regex(
ValueError,
r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0, missing_dims="warn")
assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore"))
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_("asdf"))
assert_identical(v[()], v)
v = Variable([], np.unicode_("asdf"))
assert_identical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(("x"), ["tmax"])[0][()]
expected = Variable((), "tmax")
assert_identical(actual, expected)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_shift(self, fill_value):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.shift(x=0))
assert v is not v.shift(x=0)
expected = Variable("x", [np.nan, np.nan, 1, 2, 3])
assert_identical(expected, v.shift(x=2))
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_exp = np.nan
else:
fill_value_exp = fill_value
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4])
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
expected = Variable("x", [2, 3, 4, 5, fill_value_exp])
assert_identical(expected, v.shift(x=-1, fill_value=fill_value))
expected = Variable("x", [fill_value_exp] * 5)
assert_identical(expected, v.shift(x=5, fill_value=fill_value))
assert_identical(expected, v.shift(x=6, fill_value=fill_value))
with raises_regex(ValueError, "dimension"):
v.shift(z=0)
v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"})
assert_identical(v, v.shift(x=0))
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"})
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
def test_shift2d(self):
v = Variable(("x", "y"), [[1, 2], [3, 4]])
expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]])
assert_identical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.roll(x=0))
assert v is not v.roll(x=0)
expected = Variable("x", [5, 1, 2, 3, 4])
assert_identical(expected, v.roll(x=1))
assert_identical(expected, v.roll(x=-4))
assert_identical(expected, v.roll(x=6))
expected = Variable("x", [4, 5, 1, 2, 3])
assert_identical(expected, v.roll(x=2))
assert_identical(expected, v.roll(x=-3))
with raises_regex(ValueError, "dimension"):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(("x", "y"), np.random.randn(5, 6))
for axis, dim in [(0, "x"), (1, "y")]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
assert_array_equal(expected, actual)
def test_transpose(self):
v = Variable(["time", "x"], self.d)
v2 = Variable(["x", "time"], self.d.T)
assert_identical(v, v2.transpose())
assert_identical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(["a", "b", "c", "d"], x)
w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
assert w2.shape == (5, 3, 4, 2)
assert_identical(w2, w.transpose("d", "b", "c", "a"))
assert_identical(w2, w.transpose("d", ..., "a"))
assert_identical(w2, w.transpose("d", "b", "c", ...))
assert_identical(w2, w.transpose(..., "b", "c", "a"))
assert_identical(w, w2.transpose("a", "b", "c", "d"))
w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
assert_identical(w, w3.transpose("a", "b", "c", "d"))
def test_transpose_0d(self):
for value in [
3.5,
("a", 1),
np.datetime64("2000-01-01"),
np.timedelta64(1, "h"),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert actual.identical(variable)
def test_squeeze(self):
v = Variable(["x", "y"], [[1]])
assert_identical(Variable([], 1), v.squeeze())
assert_identical(Variable(["y"], [1]), v.squeeze("x"))
assert_identical(Variable(["y"], [1]), v.squeeze(["x"]))
assert_identical(Variable(["x"], [1]), v.squeeze("y"))
assert_identical(Variable([], 1), v.squeeze(["x", "y"]))
v = Variable(["x", "y"], [[1, 2]])
assert_identical(Variable(["y"], [1, 2]), v.squeeze())
assert_identical(Variable(["y"], [1, 2]), v.squeeze("x"))
with raises_regex(ValueError, "cannot select a dimension"):
v.squeeze("y")
def test_get_axis_num(self):
v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4))
assert v.get_axis_num("x") == 0
assert v.get_axis_num(["x"]) == (0,)
assert v.get_axis_num(["x", "y"]) == (0, 1)
assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0)
with raises_regex(ValueError, "not found in array dim"):
v.get_axis_num("foobar")
def test_set_dims(self):
v = Variable(["x"], [0, 1])
actual = v.set_dims(["x", "y"])
expected = Variable(["x", "y"], [[0], [1]])
assert_identical(actual, expected)
actual = v.set_dims(["y", "x"])
assert_identical(actual, expected.T)
actual = v.set_dims({"x": 2, "y": 2})
expected = Variable(["x", "y"], [[0, 0], [1, 1]])
assert_identical(actual, expected)
v = Variable(["foo"], [0, 1])
actual = v.set_dims("foo")
expected = v
assert_identical(actual, expected)
with raises_regex(ValueError, "must be a superset"):
v.set_dims(["z"])
def test_set_dims_object_dtype(self):
v = Variable([], ("a", 1))
actual = v.set_dims(("x",), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ("a", 1)
expected = Variable(["x"], exp_values)
assert actual.identical(expected)
def test_stack(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
actual = v.stack(z=("x", "y"))
expected = Variable("z", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=("x",))
expected = Variable(("y", "z"), v.data.T, v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=())
assert_identical(actual, v)
actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
expected = Variable(("X", "Y"), v.data, v.attrs)
assert_identical(actual, expected)
def test_stack_errors(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
with raises_regex(ValueError, "invalid existing dim"):
v.stack(z=("x1",))
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(x=("x",))
def test_unstack(self):
v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
actual = v.unstack(z={"x": 2, "y": 2})
expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4, "y": 1})
expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4})
expected = Variable("x", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
def test_unstack_errors(self):
v = Variable("z", [0, 1, 2, 3])
with raises_regex(ValueError, "invalid existing dim"):
v.unstack(foo={"x": 4})
with raises_regex(ValueError, "cannot create a new dim"):
v.stack(z=("z",))
with raises_regex(ValueError, "the product of the new dim"):
v.unstack(z={"x": 5})
def test_unstack_2d(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.unstack(y={"z": 2})
expected = Variable(["x", "z"], v.data)
assert_identical(actual, expected)
actual = v.unstack(x={"z": 2})
expected = Variable(["y", "z"], v.data.T)
assert_identical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2})
assert_identical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(["a", "b"], x)
# 1d to 2d broadcasting
assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x)))
assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0])))
assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x)))
assert_identical(
v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0]))
)
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(["b", "c", "d"], y)
assert_identical(
v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y))
)
assert_identical(
w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x))
)
assert_identical(
v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0]))
)
def test_broadcasting_failures(self):
a = Variable(["x"], np.arange(10))
b = Variable(["x"], np.arange(5))
c = Variable(["x", "x"], np.arange(100).reshape(10, 10))
with raises_regex(ValueError, "mismatched lengths"):
a + b
with raises_regex(ValueError, "duplicate dimensions"):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(["x"], x)
v2 = v
v2 += 1
assert v is v2
# since we provided an ndarray for data, it is also modified in-place
assert source_ndarray(v.values) is x
assert_array_equal(v.values, np.arange(5) + 1)
with raises_regex(ValueError, "dimensions cannot change"):
v += Variable("y", np.arange(5))
def test_reduce(self):
v = Variable(["x", "y"], self.d, {"ignored": "attributes"})
assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0)))
assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x"))
assert_identical(
v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1)))
)
assert_identical(v.reduce(np.std), Variable([], self.d.std()))
assert_identical(
v.reduce(np.mean, "x").reduce(np.std, "y"),
Variable([], self.d.mean(axis=0).std()),
)
assert_allclose(v.mean("x"), v.reduce(np.mean, "x"))
with raises_regex(ValueError, "cannot supply both"):
v.mean(dim="x", axis=0)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=True)
with pytest.warns(DeprecationWarning, match="allow_lazy is deprecated"):
v.mean(dim="x", allow_lazy=False)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]])
)
def test_quantile(self, q, axis, dim, skipna):
v = Variable(["x", "y"], self.d)
actual = v.quantile(q, dim=dim, skipna=skipna)
_percentile_func = np.nanpercentile if skipna else np.percentile
expected = _percentile_func(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]])
def test_quantile_dask(self, q, axis, dim):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
actual = v.quantile(q, dim=dim)
assert isinstance(actual.data, dask_array_type)
expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_chunked_dim_error(self):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
with raises_regex(ValueError, "dimension 'x'"):
v.quantile(0.5, dim="x")
@pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]])
def test_quantile_out_of_bounds(self, q):
v = Variable(["x", "y"], self.d)
# escape special characters
with raises_regex(ValueError, r"Quantiles must be in the range \[0, 1\]"):
v.quantile(q, dim="x")
@requires_dask
@requires_bottleneck
def test_rank_dask_raises(self):
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]).chunk(2)
with raises_regex(TypeError, "arrays stored as dask"):
v.rank("x")
@requires_bottleneck
def test_rank(self):
import bottleneck as bn
# floats
v = Variable(["x", "y"], [[3, 4, np.nan, 1]])
expect_0 = bn.nanrankdata(v.data, axis=0)
expect_1 = bn.nanrankdata(v.data, axis=1)
np.testing.assert_allclose(v.rank("x").values, expect_0)
np.testing.assert_allclose(v.rank("y").values, expect_1)
# int
v = Variable(["x"], [3, 2, 1])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# str
v = Variable(["x"], ["c", "b", "a"])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# pct
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0])
v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0])
assert_equal(v.rank("x", pct=True), v_expect)
# invalid dim
with raises_regex(ValueError, "not found"):
v.rank("y")
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype=">f4")
v = Variable(["x"], data)
expected = Variable([], 5)
assert_identical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable("x", np.array([1, np.nan, 2, 3]))
assert_identical(v.mean(), Variable([], 2))
assert_identical(v.mean(skipna=True), Variable([], 2))
assert_identical(v.mean(skipna=False), Variable([], np.nan))
assert_identical(np.mean(v), Variable([], 2))
assert_identical(v.prod(), Variable([], 6))
assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6])))
assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6])))
assert_identical(v.var(), Variable([], 2.0 / 3))
assert_identical(v.median(), Variable([], 2))
v = Variable("x", [True, False, False])
assert_identical(v.any(), Variable([], True))
assert_identical(v.all(dim="x"), Variable([], False))
v = Variable("t", pd.date_range("2000-01-01", periods=3))
assert v.argmax(skipna=True) == 2
assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03")))
def test_reduce_keepdims(self):
v = Variable(["x", "y"], self.d)
assert_identical(
v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True))
)
assert_identical(
v.mean(dim="x", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)),
)
assert_identical(
v.mean(dim="y", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)),
)
assert_identical(
v.mean(dim=["y", "x"], keepdims=True),
Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)),
)
v = Variable([], 1.0)
assert_identical(
v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True))
)
@requires_dask
def test_reduce_keepdims_dask(self):
import dask.array
v = Variable(["x", "y"], self.d).chunk()
actual = v.mean(keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, keepdims=True))
assert_identical(actual, expected)
actual = v.mean(dim="y", keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True))
assert_identical(actual, expected)
def test_reduce_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
v = Variable(["x", "y"], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
assert len(vm.attrs) == 0
assert vm.attrs == {}
# Test kept attrs
vm = v.mean(keep_attrs=True)
assert len(vm.attrs) == len(_attrs)
assert vm.attrs == _attrs
def test_binary_ops_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
a = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
b = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
# Test dropped attrs
d = a - b # just one operation
assert d.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
d = a - b
assert d.attrs == _attrs
def test_count(self):
expected = Variable([], 3)
actual = Variable(["x"], [1, 2, 3, np.nan]).count()
assert_identical(expected, actual)
v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
actual = v.count()
assert_identical(expected, actual)
actual = Variable(["x"], [True, False, True]).count()
assert_identical(expected, actual)
assert actual.dtype == int
expected = Variable(["x"], [2, 3])
actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
assert_identical(expected, actual)
def test_setitem(self):
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[0, 1] = 1
assert v[0, 1] == 1
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[0, 1])] = 1
assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]]))
# boolean indexing
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False])] = 1
assert_array_equal(v[0], np.ones_like(v[0]))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False], y=[False, True, False])] = 1
assert v[0, 1] == 1
def test_setitem_fancy(self):
# assignment which should work as np.ndarray does
def assert_assigned_2d(array, key_x, key_y, values):
expected = array.copy()
expected[key_x, key_y] = values
v = Variable(["x", "y"], array)
v[dict(x=key_x, y=key_y)] = values
assert_array_equal(expected, v)
# 1d vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable((), 0),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=slice(None),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
# 2d-vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=[0],
)
assert_assigned_2d(
np.random.randn(5, 4),
key_x=Variable(["a", "b"], [[0, 1], [2, 3]]),
key_y=Variable(["a", "b"], [[1, 0], [3, 3]]),
values=[2, 3],
)
# vindex with slice
v = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
ind = Variable(["a"], [0, 1])
v[dict(x=ind, z=ind)] = 0
expected = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
expected[0, :, 0] = 0
expected[1, :, 1] = 0
assert_identical(expected, v)
# dimension broadcast
v = Variable(["x", "y"], np.ones((3, 2)))
ind = Variable(["a", "b"], [[0, 1]])
v[ind, :] = 0
expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]])
assert_identical(expected, v)
with raises_regex(ValueError, "shape mismatch"):
v[ind, ind] = np.zeros((1, 2, 1))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10)
assert_array_equal(v[0], np.ones_like(v[0]) * 10)
assert_array_equal(v[1], np.ones_like(v[1]) * 10)
assert v.dims == ("x", "y") # dimension should not change
# increment
v = Variable(["x", "y"], np.arange(6).reshape(3, 2))
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]])
assert_identical(v, expected)
ind = Variable(["a"], [0, 0])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]])
assert_identical(v, expected)
def test_coarsen(self):
v = self.cls(["x"], [0, 1, 2, 3, 4])
actual = v.coarsen({"x": 2}, boundary="pad", func="mean")
expected = self.cls(["x"], [0.5, 2.5, 4])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right")
expected = self.cls(["x"], [0, 1.5, 3.5])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim")
expected = self.cls(["x"], [1.5, 3.5])
assert_identical(actual, expected)
# working test
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for windows, func, side, boundary in [
({"x": 2}, np.mean, "left", "trim"),
({"x": 2}, np.median, {"x": "left"}, "pad"),
({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}),
]:
v.coarsen(windows, func, boundary, side)
def test_coarsen_2d(self):
# 2d-mean should be the same with the successive 1d-mean
v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean")
expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean")
assert_equal(actual, expected)
v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen(
{"y": 4}, func="mean", boundary="trim"
)
assert_equal(actual, expected)
# if there is nan, the two should be different
v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12))
v[2, 4] = np.nan
v[3, 5] = np.nan
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = (
v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen(
{"y": 4}, func="sum", boundary="trim"
)
/ 12
)
assert not actual.equals(expected)
# adjusting the nan count
expected[0, 1] *= 12 / 11
expected[1, 1] *= 12 / 11
assert_allclose(actual, expected)
v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4))
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
expected = self.cls(("x", "y"), 4 * np.ones((2, 2)))
assert_equal(actual, expected)
v[0, 0] = np.nan
v[-1, -1] = np.nan
expected[0, 0] = 3
expected[-1, -1] = 3
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False)
expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]])
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True)
expected = self.cls(("x", "y"), [[10, 18], [42, 35]])
assert_equal(actual, expected)
# perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops])
def test_coarsen_keep_attrs(self, operation="mean"):
_attrs = {"units": "test", "long_name": "testing"}
test_func = getattr(duck_array_ops, operation, None)
# Test dropped attrs
with set_options(keep_attrs=False):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == _attrs
@requires_dask
class TestVariableWithDask(VariableSubclassobjects):
cls = staticmethod(lambda *args: Variable(*args).chunk())
@pytest.mark.xfail
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.xfail
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.xfail
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.xfail
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@requires_sparse
class TestVariableWithSparse:
# TODO inherit VariableSubclassobjects to cover more tests
def test_as_sparse(self):
data = np.arange(12).reshape(3, 4)
var = Variable(("x", "y"), data)._as_sparse(fill_value=-1)
actual = var._to_dense()
assert_identical(var, actual)
class TestIndexVariable(VariableSubclassobjects):
cls = staticmethod(IndexVariable)
def test_init(self):
with raises_regex(ValueError, "must be 1-dimensional"):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(["time"], data, {"foo": "bar"})
assert pd.Index(data, name="time").identical(v.to_index())
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
v = IndexVariable(["x"], midx, {"foo": "bar"})
assert v.to_index().names == ("x_level_0", "x_level_1")
def test_data(self):
x = IndexVariable("x", np.arange(3.0))
assert isinstance(x._data, PandasIndexAdapter)
assert isinstance(x.data, np.ndarray)
assert float == x.dtype
assert_array_equal(np.arange(3), x)
assert float == x.values.dtype
with raises_regex(TypeError, "cannot be modified"):
x[:] = 0
def test_name(self):
coord = IndexVariable("x", [10.0])
assert coord.name == "x"
with pytest.raises(AttributeError):
coord.name = "y"
def test_level_names(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
assert x.level_names == midx.names
assert IndexVariable("y", [10.0]).level_names is None
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
level_1 = IndexVariable("x", midx.get_level_values("level_1"))
assert_identical(x.get_level_variable("level_1"), level_1)
with raises_regex(ValueError, "has no MultiIndex"):
IndexVariable("y", [10.0]).get_level_variable("level")
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])]
expected = IndexVariable("t", periods)
actual = IndexVariable.concat(coords, dim="t")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim="t", positions=positions)
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]])
coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])]
expected = IndexVariable("x", idx)
actual = IndexVariable.concat(coords, dim="x")
assert actual.identical(expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
def test_coordinate_alias(self):
with pytest.warns(Warning, match="deprecated"):
x = Coordinate("x", [1, 2, 3])
assert isinstance(x, IndexVariable)
def test_datetime64(self):
# GH:1932 Make sure indexing keeps precision
t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]")
v = IndexVariable("t", t)
assert v[0].data == t[0]
# These tests make use of multi-dimensional variables, which are not valid
# IndexVariable objects:
@pytest.mark.xfail
def test_getitem_error(self):
super().test_getitem_error()
@pytest.mark.xfail
def test_getitem_advanced(self):
super().test_getitem_advanced()
@pytest.mark.xfail
def test_getitem_fancy(self):
super().test_getitem_fancy()
@pytest.mark.xfail
def test_getitem_uint(self):
super().test_getitem_fancy()
@pytest.mark.xfail
@pytest.mark.parametrize(
"mode",
[
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.xfail
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
super().test_pad_constant_values(xr_arg, np_arg)
@pytest.mark.xfail
def test_rolling_window(self):
super().test_rolling_window()
@pytest.mark.xfail
def test_coarsen_2d(self):
super().test_coarsen_2d()
class TestAsCompatibleData:
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, LazilyOuterIndexedArray)
for t in types:
for data in [
np.arange(3),
pd.date_range("2000-01-01", periods=3),
pd.date_range("2000-01-01", periods=3).values,
]:
x = t(data)
assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
assert_array_equal(np.asarray(input_array), actual)
assert np.ndarray == type(actual)
assert np.asarray(input_array).dtype == actual.dtype
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(int) == actual.dtype
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(float) == actual.dtype
def test_datetime(self):
expected = np.datetime64("2000-01-01")
actual = as_compatible_data(expected)
assert expected == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01", "ns")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
assert expected is source_ndarray(np.asarray(actual))
expected = np.datetime64("2000-01-01", "ns")
actual = as_compatible_data(datetime(2000, 1, 1))
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
assert_identical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
assert expect.dtype == bool
assert_identical(expect, full_like(orig, True, dtype=bool))
# raise error on non-scalar fill_value
with raises_regex(ValueError, "must be scalar"):
full_like(orig, [1.0, 2.0])
@requires_dask
def test_full_like_dask(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
assert actual.dtype == expect_dtype
assert actual.shape == orig.shape
assert actual.dims == orig.dims
assert actual.attrs == orig.attrs
assert actual.chunks == orig.chunks
assert_array_equal(actual.values, expect_values)
check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(
full_like(orig, True, dtype=bool),
bool,
np.full_like(orig.values, True, dtype=bool),
)
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(zeros_like(orig), full_like(orig, 0))
assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(ones_like(orig), full_like(orig, 1))
assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
def test_unsupported_type(self):
# Non indexable type
class CustomArray(NDArrayMixin):
def __init__(self, array):
self.array = array
class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed):
pass
array = CustomArray(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, np.ndarray) # should not be CustomArray
array = CustomIndexable(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, CustomIndexable)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Variable("x", [1, 2, np.NaN]) > 0
assert len(record) == 0
class TestBackendIndexing:
""" Make sure all the array wrappers can be indexed. """
@pytest.fixture(autouse=True)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def check_orthogonal_indexing(self, v):
assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
def check_vectorized_indexing(self, v):
ind_x = Variable("z", [0, 2])
ind_y = Variable("z", [2, 1])
assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
def test_NumpyIndexingAdapter(self):
v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# could not doubly wrapping
with raises_regex(TypeError, "NumpyIndexingAdapter only wraps "):
v = Variable(
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
def test_LazilyOuterIndexedArray(self):
v = Variable(dims=("x", "y"), data=LazilyOuterIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
data=LazilyOuterIndexedArray(LazilyOuterIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
dims=("x", "y"), data=LazilyOuterIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
def test_CopyOnWriteArray(self):
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"), data=CopyOnWriteArray(LazilyOuterIndexedArray(self.d))
)
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_MemoryCachedArray(self):
v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
@requires_dask
def test_DaskIndexingAdapter(self):
import dask.array as da
da = da.asarray(self.d)
v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
| apache-2.0 |
mph-/lcapy | lcapy/nexpr.py | 1 | 7914 | """This module provides the DiscreteTimeDomainExpression class to
represent discrete-time expressions.
Copyright 2020--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import DiscreteTimeDomain
from .sequence import Sequence
from .functions import exp
from .sym import j, oo, pi, fsym, oo
from .dsym import nsym, ksym, zsym, dt
from .ztransform import ztransform
from .dft import DFT
from .seqexpr import SequenceExpression
from .nseq import DiscreteTimeDomainSequence, nseq
from sympy import Sum, summation, limit, DiracDelta
__all__ = ('nexpr', )
class DiscreteTimeDomainExpression(DiscreteTimeDomain, SequenceExpression):
"""Discrete-time expression or symbol."""
var = nsym
seqcls = DiscreteTimeDomainSequence
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
if 'integer' not in assumptions:
assumptions['real'] = True
super(DiscreteTimeDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(zsym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on z' % expr)
if check and expr.has(ksym) and not expr.has(Sum):
raise ValueError(
'n-domain expression %s cannot depend on k' % expr)
def _mul_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def _div_compatible_domains(self, x):
if self.domain == x.domain:
return True
return x.is_constant_domain
def as_expr(self):
return DiscreteTimeDomainExpression(self)
def differentiate(self):
"""First order difference."""
result = (self.expr - self.subs(n - 1).expr) / dt
return self.__class__(result, **self.assumptions)
def integrate(self):
"""First order integration."""
from .sym import symsymbol
from .utils import factor_const
from .extrafunctions import UnitImpulse
from .functions import u
# TODO, get SymPy to optimize this case.
expr = self.expr
const, expr = factor_const(expr, nsym)
if expr.is_Function and expr.func == UnitImpulse:
return dt * u(expr.args[0]) * const
msym = symsymbol('m', integer=True)
result = dt * summation(self.subs(msym).expr, (msym, -oo, nsym))
return self.__class__(result, **self.assumptions)
def ztransform(self, evaluate=True, **assumptions):
"""Determine one-sided z-transform."""
assumptions = self.assumptions.merge_and_infer(self, **assumptions)
result = ztransform(self.expr, self.var, zsym, evaluate)
return self.change(result, domain='Z', **assumptions)
def ZT(self, **assumptions):
return self.ztransform(**assumptions)
def plot(self, ni=None, **kwargs):
"""Plot the sequence. If `ni` is not specified, it defaults to the
range (-20, 20). `ni` can be a vector of specified sequence
indices, a tuple specifing the range, or a constant specifying
the maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned.
"""
if ni is None:
ni = (-20, 20)
from .plot import plot_sequence
return plot_sequence(self, ni, **kwargs)
def initial_value(self):
"""Determine value at n = 0."""
return self.subs(0)
def final_value(self):
"""Determine value at n = oo."""
return self.__class__(limit(self.expr, self.var, oo))
def DFT(self, N=None, evaluate=True):
if N is None:
from .sym import symsymbol
N = symsymbol('N', integer=True, positive=True)
result = DFT(self.expr, nsym, ksym, N, evaluate=evaluate)
return self.change(result, domain='discrete fourier')
def delay(self,m):
"""Delay signal by m samples."""
return self.subs(n - m)
def extent(self, n1=-100, n2=100):
"""Determine extent of the signal.
For example, nexpr([1, 1]).extent() = 2
nexpr([1, 0, 1]).extent() = 3
nexpr([0, 1, 0, 1]).extent() = 3
This performs a search between n=n1 and n=n2."""
return self.seq((n1, n2)).extent()
def discrete_time_fourier_transform(self, var=None,
images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
Use `images = 0` to avoid the infinite number of spectral images.
"""
return self.DTFT(var, images, **assumptions)
def DTFT(self, var=None, images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform.
By default this returns the DTFT in terms of `f`. Use
`.DTFT(w)` to get the angular frequency form, `.DTFT(F)` to
get the normalised frequency form, or `.DTFT(W)` to get the
normalised angular frequency form.
Use `images = 0` to avoid the infinite number of spectral images.
"""
from .extrafunctions import UnitStep
from .symbols import f, omega, Omega, F
from .fexpr import fexpr
from .dtft import DTFT
if var is None:
var = f
if id(var) not in (id(f), id(F), id(omega), id(Omega)):
raise ValueError('DTFT requires var to be f, F, omega, or Omega`, not %s' % var)
dtft = DTFT(self.expr, self.var, fsym, images=images)
result = fexpr(dtft)(var)
result = result.simplify_dirac_delta()
result = result.simplify_heaviside()
result = result.simplify_rect()
# There is a bug in SymPy when simplifying Sum('X(n - m)', (m, -oo, oo))
# result = result.simplify()
result = result.cancel_terms()
return result
def norm_angular_fourier(self, **assumptions):
from .normomegaexpr import Omega
return self.DTFT()(Omega)
def difference_equation(self, inputsym='x', outputsym='y', form='iir'):
"""Create difference equation from impulse response.
`form` can be 'fir' or 'iir' ('direct form I').
"""
H = self.ZT()
return H.difference_equation(inputsym, outputsym, form)
def remove_condition(self):
"""Remove the piecewise condition from the expression."""
if not self.is_conditional:
return self
expr = self.expr
expr = expr.args[0].args[0]
return self.__class__(expr)
def nexpr(arg, **assumptions):
"""Create nExpr object. If `arg` is nsym return n"""
from .expr import Expr
from .seq import seq
if arg is nsym:
return n
if isinstance(arg, Expr):
if assumptions == {}:
return arg
return arg.__class__(arg, **assumptions)
if isinstance(arg, str) and arg.startswith('{'):
return nseq(arg)
from numpy import ndarray
if isinstance(arg, (list, ndarray)):
return DiscreteTimeDomainSequence(arg, var=n).as_impulses()
return DiscreteTimeDomainExpression(arg, **assumptions)
from .expressionclasses import expressionclasses
expressionclasses.register('discrete time', DiscreteTimeDomainExpression)
n = DiscreteTimeDomainExpression('n', integer=True)
| lgpl-2.1 |
fja05680/pinkfish | examples/310.cryptocurrencies/strategy.py | 1 | 6833 | """
The SMA-ROC-portfolio stategy.
This is SMA-ROC strategy applied to a portfolio.
SMA-ROC is a rate of change calculation smoothed by
a moving average.
This module allows us to examine this strategy and try different
period, stop loss percent, margin, and whether to use a regime filter
or not. We split up the total capital between the symbols in the
portfolio and allocate based on either equal weight or volatility
parity weight (inverse volatility).
"""
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from talib.abstract import *
import pinkfish as pf
# A custom indicator to use in this strategy.
def SMA_ROC(ts, mom_lookback=1, sma_timeperiod=20, price='close'):
""" Returns a series which is an SMA with of a daily MOM. """
mom = pf.MOMENTUM(ts, lookback=mom_lookback, time_frame='daily', price=price)
sma_mom = SMA(mom, timeperiod=sma_timeperiod)
return sma_mom
default_options = {
'use_adj' : False,
'use_cache' : True,
'stock_market_calendar' : False,
'stop_loss_pct' : 1.0,
'margin' : 1,
'lookback' : 1,
'sma_timeperiod': 20,
'sma_pct_band': 0,
'use_regime_filter' : True,
'use_vola_weight' : False
}
class Strategy:
def __init__(self, symbols, capital, start, end, options=default_options):
self.symbols = symbols
self.capital = capital
self.start = start
self.end = end
self.options = options.copy()
self.ts = None
self.rlog = None
self.tlog = None
self.dbal = None
self.stats = None
def _algo(self):
pf.TradeLog.cash = self.capital
pf.TradeLog.margin = self.options['margin']
# Create a stop_loss dict for each symbol.
stop_loss = {symbol:0 for symbol in self.portfolio.symbols}
# stop loss pct should range between 0 and 1, user may have
# expressed this as a percentage 0-100
if self.options['stop_loss_pct'] > 1:
self.options['stop_loss_pct'] /= 100
upper_band = self.options['sma_pct_band']/1000
lower_band = -self.options['sma_pct_band']/1000
# Loop though timeseries.
for i, row in enumerate(self.ts.itertuples()):
date = row.Index.to_pydatetime()
end_flag = pf.is_last_row(self.ts, i)
# Get the prices for this row, put in dict p.
p = self.portfolio.get_prices(row,
fields=['close', 'regime', 'sma_roc', 'vola'])
# Sum the inverse volatility for each row.
inverse_vola_sum = 0
for symbol in self.portfolio.symbols:
inverse_vola_sum += 1 / p[symbol]['vola']
# Loop though each symbol in portfolio.
for symbol in self.portfolio.symbols:
# Use variables to make code cleaner.
close = p[symbol]['close']
regime = p[symbol]['regime']
sma_roc = p[symbol]['sma_roc']
inverse_vola = 1 / p[symbol]['vola']
# Sell Logic
# First we check if an existing position in symbol should be sold
# - sell sma_roc < 0
# - sell if price closes below stop loss
# - sell if end of data by adjusted the percent to zero
if symbol in self.portfolio.positions:
if sma_roc < lower_band or close < stop_loss[symbol] or end_flag:
if close < stop_loss[symbol]: print('STOP LOSS!!!')
self.portfolio.adjust_percent(date, close, 0, symbol, row)
# Buy Logic
# First we check to see if there is an existing position, if so do nothing
# - Buy if (regime > 0 or not use_regime_filter) and sma_roc > 0
else:
if (regime > 0 or not self.options['use_regime_filter']) and sma_roc > upper_band:
# Use volatility weight.
if self.options['use_vola_weight']:
weight = inverse_vola / inverse_vola_sum
# Use equal weight.
else:
weight = 1 / len(self.portfolio.symbols)
self.portfolio.adjust_percent(date, close, weight, symbol, row)
# Set stop loss
stop_loss[symbol] = (1-self.options['stop_loss_pct'])*close
# record daily balance
self.portfolio.record_daily_balance(date, row)
def run(self):
self.portfolio = pf.Portfolio()
self.ts = self.portfolio.fetch_timeseries(self.symbols, self.start, self.end,
fields=['close'], use_cache=self.options['use_cache'],
use_adj=self.options['use_adj'],
dir_name='cryptocurrencies',
stock_market_calendar=self.options['stock_market_calendar'])
# Add technical indicator: 200 sma regime filter for each symbol.
def _crossover(ts, ta_param, input_column):
return pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200,
price=input_column, prevday=False)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_crossover, ta_param=None,
output_column_suffix='regime', input_column_suffix='close')
# Add technical indicator: volatility.
def _volatility(ts, ta_param, input_column):
return pf.VOLATILITY(ts, price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_volatility, ta_param=None,
output_column_suffix='vola', input_column_suffix='close')
# Add techincal indicator: X day SMA_ROC.
def _sma_roc(ts, ta_param, input_column):
return SMA_ROC(ts, mom_lookback=self.options['lookback'],
sma_timeperiod=self.options['sma_timeperiod'],
price=input_column)
self.ts = self.portfolio.add_technical_indicator(
self.ts, ta_func=_sma_roc, ta_param=None,
output_column_suffix='sma_roc', input_column_suffix='close')
# Finalize timeseries.
self.ts, self.start = self.portfolio.finalize_timeseries(self.ts, self.start)
# Init trade log objects.
self.portfolio.init_trade_logs(self.ts)
self._algo()
self._get_logs()
self._get_stats()
def _get_logs(self):
self.rlog, self.tlog, self.dbal = self.portfolio.get_logs()
def _get_stats(self):
self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)
| mit |
tasoc/photometry | notes/halo_shift.py | 1 | 2629 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Rasmus Handberg <rasmush@phys.au.dk>
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sqlite3
import os.path
#------------------------------------------------------------------------------
def mag2flux(mag):
"""
Convert from magnitude to flux using scaling relation from
aperture photometry. This is an estimate.
Parameters:
mag (float): Magnitude in TESS band.
Returns:
float: Corresponding flux value
"""
return 10**(-0.4*(mag - 20.54))
if __name__ == '__main__':
pass
folder = r'C:\Users\au195407\Documents\tess_data_local\S01_DR01-2114872'
conn = sqlite3.connect(os.path.join(folder, 'todo.sqlite'))
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("SELECT todolist.starid,tmag,onedge,edgeflux FROM todolist INNER JOIN diagnostics ON todolist.priority=diagnostics.priority;")
results = cursor.fetchall()
starid = np.array([row['starid'] for row in results], dtype='int64')
tmag = np.array([row['tmag'] for row in results])
OnEdge = np.array([np.NaN if row['onedge'] is None else row['onedge'] for row in results])
EdgeFlux = np.array([np.NaN if row['edgeflux'] is None else row['edgeflux'] for row in results])
cursor.close()
conn.close()
print(tmag)
print(OnEdge)
print(EdgeFlux)
tmag_limit = 3.0
flux_limit = 1e-3
indx = (OnEdge > 0)
indx_halo = (tmag <= tmag_limit) & (OnEdge > 0) & (EdgeFlux/mag2flux(tmag) > flux_limit)
indx_spec = (starid == 382420379)
print(starid[indx_halo])
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], OnEdge[indx], alpha=0.5)
plt.scatter(tmag[indx_halo], OnEdge[indx_halo], marker='x', c='r')
plt.xlim(xmax=tmag_limit)
plt.ylim(ymin=0)
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(tmag[indx], EdgeFlux[indx], alpha=0.5)
ax.set_xlim(xmax=5.0)
#ax.set_ylim(ymin=0.0)
ax.set_yscale('log')
ax.set_xlabel('Tmag')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.scatter(tmag[indx], EdgeFlux[indx]/mag2flux(tmag[indx]), alpha=0.5)
plt.scatter(tmag[indx_halo], EdgeFlux[indx_halo]/mag2flux(tmag[indx_halo]), alpha=0.3, marker='x', c='r')
plt.scatter(tmag[indx_spec], EdgeFlux[indx_spec]/mag2flux(tmag[indx_spec]), alpha=0.3, marker='o', c='g', lw=2)
plt.plot([2.0, 6.0], [1e-3, 2e-2], 'r--')
plt.axhline(flux_limit, c='r', ls='--')
plt.axvline(tmag_limit, c='r', ls='--')
#plt.xlim(xmax=tmag_limit)
ax.set_ylim(ymin=1e-5, ymax=1)
ax.set_yscale('log')
ax.set_ylabel('Edge Flux / Expected Total Flux')
ax.set_xlabel('Tmag')
plt.show()
| gpl-3.0 |
pythonvietnam/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/cluster/dbscan_.py | 18 | 12859 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = X.indices.astype(np.intp, copy=False)[X_mask]
masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| mit |
dschien/PyExcelModelingHelper | excel_helper/__init__.py | 1 | 33092 | import csv
import datetime
import importlib
import sys
from abc import abstractmethod
from collections import defaultdict
from typing import Dict, List, Set
import numpy as np
import pandas as pd
from dateutil import relativedelta as rdelta
import logging
from functools import partial
from xlrd import xldate_as_tuple
import calendar
from scipy.interpolate import interp1d
import json
__author__ = 'schien'
import pkg_resources # part of setuptools
version = pkg_resources.require("excel-modelling-helper")[0].version
param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name',
'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b',
'param 3': 'param_c',
'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '',
'source': ''}
param_name_map_v2 = {'CAGR': 'cagr',
'comment': '',
'label': '',
'mean growth': 'growth_factor',
'param': '',
'ref date': 'ref_date',
'ref value': '',
'scenario': 'source_scenarios_string',
'source': '',
'tags': '',
'type': '',
'unit': '',
'variability growth': 'ef_growth_factor',
'initial_value_proportional_variation': '',
'variable': 'name'}
param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2}
# logger.basicConfig(level=logger.DEBUG)
logger = logging.getLogger(__name__)
class DistributionFunctionGenerator(object):
module: str
distribution: str
param_a: str
param_b: str
param_c: str
def __init__(self, module_name=None, distribution_name=None, param_a: float = None,
param_b: float = None, param_c: float = None, size=None, **kwargs):
"""
Instantiate a new object.
:param module_name:
:param distribution_name:
:param param_a:
:param param_b:
:param param_c:
:param size:
:param kwargs: can contain key "sample_mean_value" with bool value
"""
self.kwargs = kwargs
self.size = size
self.module_name = module_name
self.distribution_name = distribution_name
self.sample_mean_value = kwargs.get('sample_mean_value', False)
# prepare function arguments
if distribution_name == 'choice':
if type(param_a) == str:
tokens = param_a.split(',')
params = [float(token.strip()) for token in tokens]
self.random_function_params = [np.array(params, dtype=np.float)]
else:
self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)]
logger.debug(f'setting function params for choice distribution {self.random_function_params}')
else:
self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]]
def get_mean(self, distribution_function):
"""Get the mean value for a distribution.
If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted.
Else, the distribution is instantiated and then the mean is being calculated.
:param distribution_function:
:return: the mean as a scalar
"""
name = self.distribution_name
params = self.random_function_params
if name == 'normal':
return params[0]
if name == 'uniform':
return (params[0] + params[1]) / 2.
if name == 'choice':
return params[0].mean()
if name == 'triangular':
return (params[0] + params[1] + params[2]) / 3.
return distribution_function().mean()
def generate_values(self, *args, **kwargs):
"""
Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg.
If `self.sample_mean_value == True` the sample will contain "size" times the mean value.
:param args:
:param kwargs:
:return: sample as vector of given size
"""
sample_size = kwargs.get('size', self.size)
f = self.instantiate_distribution_function(self.module_name, self.distribution_name)
distribution_function = partial(f, *self.random_function_params, size=sample_size)
if self.sample_mean_value:
sample = np.full(sample_size, self.get_mean(distribution_function))
else:
sample = distribution_function()
return sample
@staticmethod
def instantiate_distribution_function(module_name, distribution_name):
module = importlib.import_module(module_name)
func = getattr(module, distribution_name)
return func
class Parameter(object):
"""
A single parameter
"""
version: int
name: str
unit: str
comment: str
source: str
scenario: str
processes: Dict[str, List]
"optional comma-separated list of tags"
tags: str
def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None,
comment: str = None, source: str = None, version=None,
**kwargs):
# The source definition of scenarios. A comma-separated list
self.version = version
self.source = source
self.comment = comment
self.unit = unit
self.source_scenarios_string = source_scenarios_string
self.tags = tags
self.name = name
self.scenario = None
self.cache = None
# track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter
self.processes = defaultdict(list)
self.kwargs = kwargs
def __call__(self, settings=None, *args, **kwargs):
"""
Samples from a parameter. Values are cached and returns the same value every time called.
@todo confusing interface that accepts 'settings' and kwargs at the same time.
worse- 'use_time_series' must be present in the settings dict
:param args:
:param kwargs:
:return:
"""
if self.cache is None:
kwargs['name'] = self.name
kwargs['unit'] = self.unit
kwargs['tags'] = self.tags
kwargs['scenario'] = self.scenario
if not settings:
settings = {}
common_args = {'size': settings.get('sample_size', 1),
'sample_mean_value': settings.get('sample_mean_value', False)}
common_args.update(**self.kwargs)
if settings.get('use_time_series', False):
if self.version == 2:
generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times'])
else:
generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args,
times=settings['times'])
else:
generator = DistributionFunctionGenerator(**common_args)
self.cache = generator.generate_values(*args, **kwargs)
return self.cache
def add_usage(self, process_name, variable_name):
# add the name of a variable of a process model that is backed by this parameter
self.processes[process_name].append(variable_name)
class GrowthTimeSeriesGenerator(DistributionFunctionGenerator):
ref_date: str
# of the mean values
# the type of growth ['exp']
# growth_function_type: str
# of the error function
variance: str
# error function growth rate
ef_growth_factor: str
def __init__(self, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
assert 'ref value' in self.kwargs
# 1. Generate $\mu$
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
ref_date = self.ref_date
if not ref_date:
raise Exception(f"Ref date not set for variable {kwargs['name']}")
mu = self.generate_mu(end_date, ref_date, start_date)
# 3. Generate $\sigma$
## Prepare array with growth values $\sigma$
if self.sample_mean_value:
sigma = np.zeros((len(self.times), self.size))
else:
if self.kwargs['type'] == 'interp':
def get_date(record):
return datetime.datetime.strptime(record[0], "%Y-%m-%d")
ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date)
intial_value = ref_value_[0][1]
else:
intial_value = float(self.kwargs['ref value'])
variability_ = intial_value * self.kwargs['initial_value_proportional_variation']
logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}')
sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size))
# logger.debug(ref_date.strftime("%b %d %Y"))
## 4. Prepare growth array for $\alpha_{sigma}$
alpha_sigma = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['ef_growth_factor'], 1)
### 5. Prepare DataFrame
iterables = [self.times, range(self.size)]
index_names = ['time', 'samples']
_multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
# logger.debug(start_date)
# logger.debug(end_date)
from dateutil import relativedelta
r = relativedelta.relativedelta(end_date, start_date)
months = r.years * 12 + r.months + 1
name = kwargs['name']
## Apply growth to $\sigma$ and add $\sigma$ to $\mu$
# logger.debug(sigma.size)
# logger.debug(alpha_sigma.shape)
# logger.debug(months)
unit_ = kwargs["unit"]
if not unit_:
unit_ = 'dimensionless'
series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index,
dtype=f'pint[{unit_}]')
## test if df has sub-zero values
df_sigma__dropna = series.where(series < 0).dropna()
if not df_sigma__dropna.pint.m.empty:
logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}")
return series
def generate_mu(self, end_date, ref_date, start_date):
if self.kwargs['type'] == 'exp':
mu_bar = np.full(len(self.times), float(self.kwargs['ref value']))
# 2. Apply Growth to Mean Values $\alpha_{mu}$
alpha_mu = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['growth_factor'], 1)
mu = mu_bar * alpha_mu.ravel()
mu = mu.reshape(len(self.times), 1)
return mu
if self.kwargs['type'] == 'interp':
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def interpolate(growth_config: Dict[str, float], date_range, kind='linear'):
arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in
growth_config.keys()])
arr2 = np.array([val for val in growth_config.values()])
f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate')
return f([toTimestamp(date_val) for date_val in date_range])
ref_value_ = json.loads(self.kwargs['ref value'].strip())
return interpolate(ref_value_, self.times, self.kwargs['param'])
class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator):
cagr: str
ref_date: str
def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cagr = cagr if cagr else 0
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,))
alpha = self.cagr
# @todo - fill to cover the entire time: define rules for filling first
ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime()
# assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.'
# assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.'
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size)
values *= a.ravel()
# df = pd.DataFrame(values)
# df.columns = [kwargs['name']]
# df.set_index(self._multi_index, inplace=True)
# # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series
# data_series = df.iloc[:, 0]
# data_series._metadata = kwargs
# data_series.index.rename(['time', 'samples'], inplace=True)
#
if not kwargs["unit"]:
series = pd.Series(values, index=self._multi_index, dtype='pint[dimensionless]')
else:
series = pd.Series(values, index=self._multi_index, dtype=f'pint[{kwargs["unit"]}]')
return series
def growth_coefficients(start_date, end_date, ref_date, alpha, samples):
"""
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0).
a growth rate alpha
t0 start date
t' end date
y' output
y0 start value
"""
start_offset = 0
if ref_date < start_date:
offset_delta = rdelta.relativedelta(start_date, ref_date)
start_offset = offset_delta.months + 12 * offset_delta.years
start_date = ref_date
end_offset = 0
if ref_date > end_date:
offset_delta = rdelta.relativedelta(ref_date, end_date)
end_offset = offset_delta.months + 12 * offset_delta.years
end_date = ref_date
delta_ar = rdelta.relativedelta(ref_date, start_date)
ar = delta_ar.months + 12 * delta_ar.years
delta_br = rdelta.relativedelta(end_date, ref_date)
br = delta_br.months + 12 * delta_br.years
# we place the ref point on the lower interval (delta_ar + 1) but let it start from 0
# in turn we let the upper interval start from 1
g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float)
h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float)
g = np.flipud(g)
# now join the two arrays
a = np.vstack((g, h))
if start_offset > 0:
a = a[start_offset:]
if end_offset > 0:
a = a[:-end_offset]
return a
class ParameterScenarioSet(object):
"""
The set of all version of a parameter for all the scenarios.
"""
default_scenario = 'default'
"the name of the parameters in this set"
parameter_name: str
scenarios: Dict[str, Parameter]
def __init__(self):
self.scenarios = {}
def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario):
"""
Add a scenario for this parameter.
:param scenario_name:
:param parameter:
:return:
"""
self.scenarios[scenario_name] = parameter
def __getitem__(self, item):
return self.scenarios.__getitem__(item)
def __setitem__(self, key, value):
return self.scenarios.__setitem__(key, value)
class ParameterRepository(object):
"""
Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses).
The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will
create new samples.
Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet.
"""
parameter_sets: Dict[str, ParameterScenarioSet]
tags: Dict[str, Dict[str, Set[Parameter]]]
def __init__(self):
self.parameter_sets = defaultdict(ParameterScenarioSet)
self.tags = defaultdict(lambda: defaultdict(set))
def add_all(self, parameters: List[Parameter]):
for p in parameters:
self.add_parameter(p)
def clear_cache(self):
for p_sets in self.parameter_sets.values():
for param_name, param in p_sets.scenarios.items():
param.cache = None
def add_parameter(self, parameter: Parameter):
"""
A parameter can have several scenarios. They are specified as a comma-separated list in a string.
:param parameter:
:return:
"""
# try reading the scenarios from the function arg or from the parameter attribute
scenario_string = parameter.source_scenarios_string
if scenario_string:
_scenarios = [i.strip() for i in scenario_string.split(',')]
self.fill_missing_attributes_from_default_parameter(parameter)
else:
_scenarios = [ParameterScenarioSet.default_scenario]
for scenario in _scenarios:
parameter.scenario = scenario
self.parameter_sets[parameter.name][scenario] = parameter
# record all tags for this parameter
if parameter.tags:
_tags = [i.strip() for i in parameter.tags.split(',')]
for tag in _tags:
self.tags[tag][parameter.name].add(parameter)
def fill_missing_attributes_from_default_parameter(self, param):
"""
Empty fields in Parameter definitions in scenarios are populated with default values.
E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar.
| name | scenario | val | tags | source |
|----------|----------|-----|--------|------------|
| Power_TV | | 60 | UD, TV | EnergyStar |
| Power_TV | 8K | 85 | new_tag| |
**Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten
with the default value.
:param param:
:return:
"""
if not self.exists(param.name) or not ParameterScenarioSet.default_scenario in self.parameter_sets[
param.name].scenarios.keys():
logger.warning(
f'No default value for param {param.name} found.')
return
default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario]
for att_name, att_value in default.__dict__.items():
if att_name in ['unit', 'label', 'comment', 'source', 'tags']:
if att_name == 'tags' and default.tags != param.tags:
logger.warning(
f'For param {param.name} for scenarios {param.source_scenarios_string}, tags is different from default parameter tags. Overwriting with default values.')
setattr(param, att_name, att_value)
if not getattr(param, att_name):
logger.debug(
f'For param {param.name} for scenarios {param.source_scenarios_string}, populating attribute {att_name} with value {att_value} from default parameter.')
setattr(param, att_name, att_value)
def __getitem__(self, item) -> Parameter:
"""
Return the default scenario parameter for a given variable name
:param item: the name of the variable
:return:
"""
return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario)
def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter:
if self.exists(param_name, scenario=scenario_name):
return self.parameter_sets[param_name][scenario_name]
try:
return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario]
except KeyError:
raise KeyError(f"{param_name} not found")
def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]:
"""
Get all registered dicts that are registered for a tag
:param tag: str - single tag
:return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for
all parameter names with a given tag
"""
return self.tags[tag]
def exists(self, param, scenario=None) -> bool:
# if scenario is not None:
# return
present = param in self.parameter_sets.keys()
if not present:
return False
scenario = scenario if scenario else ParameterScenarioSet.default_scenario
return scenario in self.parameter_sets[param].scenarios.keys()
def list_scenarios(self, param):
if param in self.parameter_sets.keys():
return self.parameter_sets[param].scenarios.keys()
class ExcelHandler(object):
version: int
def __init__(self):
self.version = 1
@abstractmethod
def load_definitions(self, sheet_name, filename=None):
raise NotImplementedError()
class OpenpyxlExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
definitions = []
from openpyxl import load_workbook
wb = load_workbook(filename=filename, data_only=True)
_sheet_names = [sheet_name] if sheet_name else wb.sheetnames
for _sheet_name in _sheet_names:
sheet = wb.get_sheet_by_name(_sheet_name)
rows = list(sheet.rows)
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class Xlsx2CsvHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
from xlsx2csv import Xlsx2csv
data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0)
definitions = []
_sheet_names = [sheet_name] if sheet_name else [data.keys()]
for _sheet_name in _sheet_names:
sheet = data[_sheet_name]
header = sheet.header
if header[0] != 'variable':
continue
for row in sheet.rows:
values = {}
for key, cell in zip(header, row):
values[key] = cell
definitions.append(values)
return definitions
class CSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
return csv.DictReader(open(filename), delimiter=',')
class PandasCSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
self.version = 2
import pandas as pd
df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'],
dtype={'initial_value_proportional_variation': 'float64'},
dayfirst=True
# date_parser=lambda x: pd.datetime.strptime(x, '%d-%m-%Y')
)
df = df.dropna(subset=['variable', 'ref value'])
df.fillna("", inplace=True)
return df.to_dict(orient='records')
class XLRDExcelHandler(ExcelHandler):
version: int
@staticmethod
def get_sheet_range_bounds(filename, sheet_name):
import xlrd
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name(sheet_name)
rows = list(sheet.get_rows())
return len(rows)
def load_definitions(self, sheet_name, filename=None):
import xlrd
wb = xlrd.open_workbook(filename)
sh = None
definitions = []
_definition_tracking = defaultdict(dict)
_sheet_names = [sheet_name] if sheet_name else [sh.name for sh in wb.sheets()]
version = 1
try:
sheet = wb.sheet_by_name('metadata')
rows = list(sheet.get_rows())
for row in rows:
if row[0].value == 'version':
version = row[1].value
self.version = version
except:
logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2')
for _sheet_name in _sheet_names:
if _sheet_name == 'metadata':
continue
sheet = wb.sheet_by_name(_sheet_name)
rows = list(sheet.get_rows())
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for i, row in enumerate(rows[1:]):
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
if not values['variable']:
# logger.debug(f'ignoring row {i}: {row}')
continue
if 'ref date' in values and values['ref date']:
if isinstance(values['ref date'], float):
values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode))
if values['ref date'].day != 1:
logger.warning(f'ref date truncated to first of month for variable {values["variable"]}')
values['ref date'] = values['ref date'].replace(day=1)
else:
raise Exception(
f"{values['ref date']} for variable {values['variable']} is not a date - "
f"check spreadsheet value is a valid day of a month")
logger.debug(f'values for {values["variable"]}: {values}')
definitions.append(values)
scenario = values['scenario'] if values['scenario'] else "n/a"
if scenario in _definition_tracking[values['variable']]:
logger.error(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
raise ValueError(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
else:
_definition_tracking[values['variable']][scenario] = 1
return definitions
class XLWingsExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
import xlwings as xw
definitions = []
wb = xw.Book(fullname=filename)
_sheet_names = [sheet_name] if sheet_name else wb.sheets
for _sheet_name in _sheet_names:
sheet = wb.sheets[_sheet_name]
range = sheet.range('A1').expand()
rows = range.rows
header = [cell.value for cell in rows[0]]
# check if this sheet contains parameters or if it documentation
if header[0] != 'variable':
continue
total_rows = XLRDExcelHandler.get_sheet_range_bounds(filename, _sheet_name)
range = sheet.range((1, 1), (total_rows, len(header)))
rows = range.rows
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class ExcelParameterLoader(object):
definition_version: int
"""Utility to populate ParameterRepository from spreadsheets.
The structure of the spreadsheets is:
| variable | ... |
|----------|-----|
| ... | ... |
If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored.
"""
def __init__(self, filename, excel_handler='xlrd', **kwargs):
self.filename = filename
self.definition_version = 2
logger.info(f'Using {excel_handler} excel handler')
excel_handler_instance = None
if excel_handler == 'csv':
excel_handler_instance = CSVHandler()
if excel_handler == 'pandas':
excel_handler_instance = PandasCSVHandler()
if excel_handler == 'openpyxl':
excel_handler_instance = OpenpyxlExcelHandler()
if excel_handler == 'xlsx2csv':
excel_handler_instance = Xlsx2CsvHandler()
if excel_handler == 'xlwings':
excel_handler_instance = XLWingsExcelHandler()
if excel_handler == 'xlrd':
excel_handler_instance = XLRDExcelHandler()
self.excel_handler: ExcelHandler = excel_handler_instance
def load_parameter_definitions(self, sheet_name: str = None):
"""
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
"""
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None):
"""
Create a Repo from an excel file.
:param repository: the repository to load into
:param sheet_name:
:return:
"""
repository.add_all(self.load_parameters(sheet_name))
def load_parameters(self, sheet_name):
parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name)
params = []
param_name_map = param_name_maps[int(self.definition_version)]
for _def in parameter_definitions:
# substitute names from the headers with the kwargs names in the Parameter and Distributions classes
# e.g. 'variable' -> 'name', 'module' -> 'module_name', etc
parameter_kwargs_def = {}
for k, v in _def.items():
if k in param_name_map:
if param_name_map[k]:
parameter_kwargs_def[param_name_map[k]] = v
else:
parameter_kwargs_def[k] = v
name_ = parameter_kwargs_def['name']
del parameter_kwargs_def['name']
p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def)
params.append(p)
return params
| mit |
Habasari/sms-tools | lectures/08-Sound-transformations/plots-code/stftFiltering-orchestra.py | 18 | 1677 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import utilFunctions as UF
import stftTransformations as STFTT
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(2048)
N = 2048
H = 512
# design a band stop filter using a hanning window
startBin = int(N*500.0/fs)
nBins = int(N*2000.0/fs)
bandpass = (np.hanning(nBins) * 65.0) - 60
filt = np.zeros(N/2+1)-60
filt[startBin:startBin+nBins] = bandpass
y = STFTT.stftFiltering(x, fs, w, N, H, filt)
mX,pX = STFT.stftAnal(x, fs, w, N, H)
mY,pY = STFT.stftAnal(y, fs, w, N, H)
plt.figure(1, figsize=(12, 9))
plt.subplot(311)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (orchestra.wav)')
plt.autoscale(tight=True)
plt.subplot(312)
plt.plot(fs*np.arange(mX[0,:].size)/float(N), filt, 'k', lw=1.3)
plt.axis([0, fs/2, -60, 7])
plt.title('filter shape')
plt.subplot(313)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mY[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY))
plt.title('mY')
plt.autoscale(tight=True)
plt.tight_layout()
UF.wavwrite(y, fs, 'orchestra-stft-filtering.wav')
plt.savefig('stftFiltering-orchestra.png')
plt.show()
| agpl-3.0 |
adammenges/statsmodels | statsmodels/tools/tests/test_tools.py | 26 | 18818 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones(5))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
heli522/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/tslibs/test_libfrequencies.py | 2 | 2889 | import pytest
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG,
_period_str_to_code,
get_rule_month,
is_subperiod,
is_superperiod,
)
from pandas.tseries import offsets
@pytest.mark.parametrize(
"obj,expected",
[
("W", "DEC"),
(offsets.Week(), "DEC"),
("D", "DEC"),
(offsets.Day(), "DEC"),
("Q", "DEC"),
(offsets.QuarterEnd(startingMonth=12), "DEC"),
("Q-JAN", "JAN"),
(offsets.QuarterEnd(startingMonth=1), "JAN"),
("A-DEC", "DEC"),
("Y-DEC", "DEC"),
(offsets.YearEnd(), "DEC"),
("A-MAY", "MAY"),
("Y-MAY", "MAY"),
(offsets.YearEnd(month=5), "MAY"),
],
)
def test_get_rule_month(obj, expected):
result = get_rule_month(obj)
assert result == expected
@pytest.mark.parametrize(
"obj,expected",
[
("A", 1000),
("A-DEC", 1000),
("A-JAN", 1001),
("Y", 1000),
("Y-DEC", 1000),
("Y-JAN", 1001),
("Q", 2000),
("Q-DEC", 2000),
("Q-FEB", 2002),
("W", 4000),
("W-SUN", 4000),
("W-FRI", 4005),
("Min", 8000),
("ms", 10000),
("US", 11000),
("NS", 12000),
],
)
def test_period_str_to_code(obj, expected):
assert _period_str_to_code(obj) == expected
@pytest.mark.parametrize(
"p1,p2,expected",
[
# Input validation.
(offsets.MonthEnd(), None, False),
(offsets.YearEnd(), None, False),
(None, offsets.YearEnd(), False),
(None, offsets.MonthEnd(), False),
(None, None, False),
(offsets.YearEnd(), offsets.MonthEnd(), True),
(offsets.Hour(), offsets.Minute(), True),
(offsets.Second(), offsets.Milli(), True),
(offsets.Milli(), offsets.Micro(), True),
(offsets.Micro(), offsets.Nano(), True),
],
)
def test_super_sub_symmetry(p1, p2, expected):
assert is_superperiod(p1, p2) is expected
assert is_subperiod(p2, p1) is expected
@pytest.mark.parametrize(
"freq,expected,aliases",
[
("D", 6000, ["DAY", "DLY", "DAILY"]),
("M", 3000, ["MTH", "MONTH", "MONTHLY"]),
("N", 12000, ["NANOSECOND", "NANOSECONDLY"]),
("H", 7000, ["HR", "HOUR", "HRLY", "HOURLY"]),
("T", 8000, ["minute", "MINUTE", "MINUTELY"]),
("L", 10000, ["MILLISECOND", "MILLISECONDLY"]),
("U", 11000, ["MICROSECOND", "MICROSECONDLY"]),
("S", 9000, ["sec", "SEC", "SECOND", "SECONDLY"]),
("B", 5000, ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY"]),
],
)
def test_assert_aliases_deprecated(freq, expected, aliases):
assert isinstance(aliases, list)
assert _period_str_to_code(freq) == expected
for alias in aliases:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
_period_str_to_code(alias)
| apache-2.0 |
kaku289/paparazzi | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
df8oe/UHSDR | mchf-eclipse/drivers/ui/lcd/edit-8x8-font.py | 4 | 2343 | # Tool to extract 8x8 font data, save to bitmap file, and apply modifications
# to source code after editing the bitmap.
from __future__ import print_function
from matplotlib.pyplot import imread, imsave, imshow, show
import numpy as np
import sys
# Where to find the font data - may need updated if code has changed.
source_file = 'ui_lcd_hy28_fonts.c'
start_marker = 'const uint8_t GL_ASCII8x8_Table [] ='
start_offset = 2 # Data starts this number of lines after start marker.
end_marker = '};' # Indicates end of font data.
# Image filename used in extract and insert modes.
image_file = 'font-8x8.png'
mode = None
if len(sys.argv) > 1:
mode = sys.argv[1]
if mode not in ('show', 'extract', 'insert'):
print("Usage: %s { show | extract | insert }" % sys.argv[0])
sys.exit(1)
# Get the literals used to populate the font array in the source file
lines = [line.rstrip() for line in open(source_file).readlines()]
start = lines.index(start_marker) + start_offset
end = start + lines[start:].index(end_marker)
data = str.join("", lines[start:end])
# Eval the literals to get the values into a numpy array
packed = eval("np.array([%s], np.uint8)" % data)
# Reorganise into a monochrome image, with the 96 8 x 8 characters
# laid out in 8 rows by 12 columns for easier viewing/editing
unpacked = np.unpackbits(packed)
bitmaps = unpacked.reshape(96, 8, 8)
indices = np.arange(96).reshape(8, 12)
image = np.block([[bitmaps[idx] for idx in row] for row in indices])
if mode == 'show':
# Display font image
imshow(image, cmap='binary')
show()
elif mode == 'extract':
# Save font image
imsave(image_file, image, format='png', cmap='binary')
elif mode == 'insert':
# Read in modified font image
image = imread(image_file)[:,:,0].astype(bool)
# Reorganise back to original order
bitmaps = np.vstack([np.vstack(np.split(row, 12, 1))
for row in np.split(image, 8)])
unpacked = bitmaps.reshape(-1)
packed = ~np.packbits(unpacked)
# Replace lines of file in same format as used before
grouped = packed.reshape(-1, 8)
for i, group in enumerate(grouped):
line = (" " + " 0x%02x," * 8) % tuple(group)
lines[start + i] = line
# Write out modified source file
open(source_file, 'w').writelines([line + "\n" for line in lines])
| gpl-3.0 |
amacd31/bom_data_parser | tests/test_hrs.py | 1 | 2066 | import os
import numpy as np
import pandas as pd
import unittest
from datetime import datetime
from bom_data_parser import read_hrs_csv
class HRSTest(unittest.TestCase):
def setUp(self):
self.test_cdo_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts.csv')
def test_hrs(self):
data, attributes = read_hrs_csv(self.test_cdo_file)
self.assertTrue('Q' in data.columns)
self.assertTrue('QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2012,10,4))
self.assertAlmostEqual(data.Q.values[0], 127.312,3)
self.assertAlmostEqual(data.Q.values[-1], 186.238,3)
self.assertEqual(data.QCode.values[0], 10)
self.assertEqual(data.QCode.values[-1], 10)
def test_hrs_201510_format(self):
test_file = os.path.join(os.path.dirname(__file__), 'data', 'HRS', '410730_daily_ts_201510.csv')
data, attributes = read_hrs_csv(test_file)
self.assertTrue('Flow (ML)' in data.columns)
self.assertTrue('Bureau QCode' in data.columns)
self.assertEqual(attributes['station_name'], 'Cotter River at Gingera (410730)')
self.assertEqual(attributes['catchment_area'], 130.0)
self.assertEqual(attributes['latitude'], 148.8212)
self.assertEqual(attributes['longitude'], -35.5917)
self.assertEqual(data.index[0], datetime(1963,7,5))
self.assertEqual(data.index[-1], datetime(2014,12,31))
self.assertAlmostEqual(data['Flow (ML)'].values[0], 127.322,3)
self.assertAlmostEqual(data['Flow (ML)'].values[-1], 16.1915,4)
self.assertEqual(data['Bureau QCode'].values[0], 'A')
self.assertEqual(data['Bureau QCode'].values[-1], 'A')
| bsd-3-clause |
spel-uchile/SUCHAI-Flight-Software | sandbox/log_parser.py | 1 | 1956 | import re
import argparse
import pandas as pd
# General expressions
re_error = re.compile(r'\[ERROR\]\[(\d+)\]\[(\w+)\](.+)')
re_warning = re.compile(r'\[WARN \]\[(\d+)\]\[(\w+)\](.+)')
re_info = re.compile(r'\[INFO \]\[(\d+)\]\[(\w+)\](.+)')
re_debug = re.compile(r'\[DEBUG\]\[(\d+)\]\[(\w+)\](.+)')
re_verbose = re.compile(r'\[VERB \]\[(\d+)\]\[(\w+)\](.+)')
# Specific expressions
re_cmd_run = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Running the command: (.+)')
re_cmd_result = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Command result: (\d+)')
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser()
# General expressions
parser.add_argument('file', type=str, help="Log file")
parser.add_argument('--error', action="store_const", const=re_error)
parser.add_argument('--warning', action="store_const", const=re_warning)
parser.add_argument('--info', action="store_const", const=re_info)
parser.add_argument('--debug', action="store_const", const=re_debug)
parser.add_argument('--verbose', action="store_const", const=re_verbose)
# Specific expressions
parser.add_argument('--cmd-run', action="store_const", const=re_cmd_run)
parser.add_argument('--cmd-result', action="store_const", const=re_cmd_result)
return parser.parse_args()
def parse_text(text, regexp):
return regexp.findall(text)
def save_parsed(logs, file, format=None):
df = pd.DataFrame(logs)
# print(df)
df.to_csv(file)
if __name__ == "__main__":
args = get_parameters()
print("Reading file {}...".format(args.file))
with open(args.file) as logfile:
text = logfile.read()
args = vars(args)
print(args)
for type, regexp in args.items():
if type is not "file" and regexp is not None:
print("Parsing {}...", type)
logs = parse_text(text, regexp)
save_parsed(logs, args["file"]+type+".csv")
| gpl-3.0 |
kezilu/pextant | pextant/api.py | 2 | 3350 | import csv
import json
import logging
import re
from pextant.solvers.astarMesh import astarSolver
from pextant.analysis.loadWaypoints import JSONloader
import matplotlib.pyplot as plt
logger = logging.getLogger()
class Pathfinder:
"""
This class performs the A* path finding algorithm and contains the Cost Functions. Also includes
capabilities for analysis of a path.
This class still needs performance testing for maps of larger sizes. I don't believe that
we will be doing anything extremely computationally intensive though.
Current cost functions are Time, Distance, and (Metabolic) Energy. It would be useful to be able to
optimize on other resources like battery power or water sublimated, but those are significantly more
difficult because they depend on shadowing and was not implemented by Aaron.
"""
def __init__(self, explorer_model, environmental_model):
cheating = 1
self.solver = astarSolver(environmental_model, explorer_model,
optimize_on = 'Energy', heuristic_accelerate = cheating)
def aStarCompletePath(self, optimize_on, waypoints, returnType="JSON", dh=None, fileName=None ):
pass
def completeSearch(self, optimize_on, waypoints, filepath=None ):
"""
Returns a tuple representing the path and the total cost of the path.
The path will be a list. All activity points will be duplicated in
the returned path.
waypoints is a list of activityPoint objects, in the correct order. fileName is
used when we would like to write stuff to a file and is currently necessary
for csv return types.
"""
segmentsout, rawpoints, items = self.solver.solvemultipoint(waypoints)
if filepath:
extension = re.search('^(.+\/[^/]+)\.(\w+)$', filepath).group(2)
else:
extension = None
if extension == "json":
json.dump(segmentsout.tojson(), filepath)
elif extension == "csv":
header = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]
rows = header + segmentsout.tocsv()
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in rows:
writer.writerow(row)
return rows
return segmentsout, rawpoints, items
def completeSearchFromJSON(self, optimize_on, jsonInput, filepath=None, algorithm="A*",
numTestPoints=0):
jloader = JSONloader.from_string(jsonInput)
waypoints = jloader.get_waypoints()
#if algorithm == "A*":
segmentsout,_,_ = self.completeSearch(optimize_on, waypoints, filepath)
updatedjson = jloader.add_search_sol(segmentsout.list)
return updatedjson
if __name__ == '__main__':
from pextant.analysis.loadWaypoints import loadPoints
from explorers import Astronaut
from EnvironmentalModel import GDALMesh
hi_low = GDALMesh('maps/HI_lowqual_DEM.tif')
waypoints = loadPoints('waypoints/HI_13Nov16_MD7_A.json')
env_model = hi_low.loadSubSection(waypoints.geoEnvelope())
astronaut = Astronaut(80)
pathfinder = Pathfinder(astronaut, env_model)
out = pathfinder.aStarCompletePath('Energy', waypoints)
print out | mit |
qrsforever/workspace | python/learn/thinkstats/rankit.py | 1 | 1807 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print(means)
if __name__ == "__main__":
main()
| mit |
rrohan/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
Midnighter/pyorganism | setup.py | 1 | 2511 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
PyOrganism Package
==================
:Authors:
Moritz Emanuel Beber
:Date:
2012-05-22
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
setup.py
"""
import sys
from os.path import join
from setuptools import (setup, Extension)
try:
from Cython.Distutils import build_ext
except ImportError as err:
sys.exit("Apologies, you need 'Cython' to install 'pyorganism'.")
if __name__ == "__main__":
# continuous
sources = ["continuous_wrapper.pyx", "continuous.c"]
c_path = join("pyorganism", "regulation", "src")
continuous = Extension("pyorganism.regulation.continuous_wrapper",
sources=[join(c_path, src) for src in sources],
include_dirs=[c_path]
)
setup(
name="pyorganism",
version="0.2.5",
license="BSD",
description="analyze organisational principles in living organisms",
author="Moritz Emanuel Beber",
author_email="moritz (dot) beber (at) gmail (dot) com",
url="http://github.com/Midnighter/pyorganism",
zip_safe=False,
install_requires=[
"future",
"networkx",
"numpy",
"pandas"
],
packages=["pyorganism",
"pyorganism.io",
"pyorganism.metabolism",
"pyorganism.regulation",
],
# package_data = {"pyorganism": ["data/*.xml", "data/*.txt", "data/*.tsv"]},
ext_modules=[continuous],
cmdclass={"build_ext": build_ext},
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
| bsd-3-clause |
chrismamil/chowda | test/test_chowda.py | 1 | 2201 | import unittest
import os
import chowda.parsing as parse
import datetime
import pandas as pd
from chowda.load import load_file
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_FILE = "CTL1 wk3 exp1 RAW data.txt"
TEST_1 = os.path.join(DATA_DIR, TEST_FILE)
class TestChowda(unittest.TestCase):
def setup(self):
test_file = os.path.join(DATA_DIR, TEST_FILE)
with open(test_file) as in_handle:
self.in_data = in_handle.readlines()
def test_parse_experiment_time(self):
result = parse.parse_experiment_time(self.in_data[0])
self.assertEquals(result.keys()[0], "Experiment Started")
def test_parse_subject(self):
result = parse.parse_subject(self.in_data[1])
self.assertEquals(result["Subject"], "CNS1")
def test_parse_mass(self):
result = parse.parse_subject_mass(self.in_data[2])
self.assertEquals(result["Subject Mass"], 34.26)
def test_load_file(self):
from chowda.load import load_file
result = load_file(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
def test_get_header(self):
from chowda.load import get_header
result = get_header(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(result[-1].split(",")[0].strip(), '"========"')
def test_get_data(self):
from chowda.load import get_data
result = get_data(TEST_1)
self.assertEquals(result[0].split(",", 1)[0], "Interval")
def test_partition_file(self):
from chowda.load import partition_file
header, data = partition_file(TEST_1)
self.assertEquals(header[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(header[-1].split(",")[0].strip(), '"========"')
self.assertEquals(data[0].split(",", 1)[0], "Interval")
def test_load_dataframe(self):
from chowda.load import load_dataframe
result = load_dataframe(parse.get_data(self.in_data))
self.assertEquals(result["Interval"].ix[0], "001")
| mit |
cainiaocome/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
ralbayaty/KaggleRetina | testing/censureHistCalc.py | 1 | 4517 | from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows() | gpl-2.0 |
xuewei4d/scikit-learn | sklearn/inspection/tests/test_permutation_importance.py | 7 | 17760 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_diabetes(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, seed=42):
# Permutation variable importance should not be affected by the high
# cardinality bias of traditional feature importances, especially when
# computed on a held-out test set:
rng = np.random.RandomState(seed)
n_repeats = 5
n_samples = 1000
n_classes = 5
n_informative_features = 2
n_noise_features = 1
n_features = n_informative_features + n_noise_features
# Generate a multiclass classification dataset and a set of informative
# binary features that can be used to predict some classes of y exactly
# while leaving some classes unexplained to make the problem harder.
classes = np.arange(n_classes)
y = rng.choice(classes, size=n_samples)
X = np.hstack([(y == c).reshape(-1, 1)
for c in classes[:n_informative_features]])
X = X.astype(np.float32)
# Not all target classes are explained by the binary class indicator
# features:
assert n_informative_features < n_classes
# Add 10 other noisy features with high cardinality (numerical) values
# that can be used to overfit the training data.
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
assert X.shape == (n_samples, n_features)
# Split the dataset to be able to evaluate on a held-out test set. The
# Test size should be large enough for importance measurements to be
# stable:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=rng)
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
clf.fit(X_train, y_train)
# Variable importances computed by impurity decrease on the tree node
# splits often use the noisy features in splits. This can give misleading
# impression that high cardinality noisy variables are the most important:
tree_importances = clf.feature_importances_
informative_tree_importances = tree_importances[:n_informative_features]
noisy_tree_importances = tree_importances[n_informative_features:]
assert informative_tree_importances.max() < noisy_tree_importances.min()
# Let's check that permutation-based feature importances do not have this
# problem.
r = permutation_importance(clf, X_test, y_test, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert r.importances.shape == (X.shape[1], n_repeats)
# Split the importances between informative and noisy features
informative_importances = r.importances_mean[:n_informative_features]
noisy_importances = r.importances_mean[n_informative_features:]
# Because we do not have a binary variable explaining each target classes,
# the RF model will have to use the random variable to make some
# (overfitting) splits (as max_depth is not set). Therefore the noisy
# variables will be non-zero but with small values oscillating around
# zero:
assert max(np.abs(noisy_importances)) > 1e-7
assert noisy_importances.max() < 0.05
# The binary features correlated with y should have a higher importance
# than the high cardinality noisy features.
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
# contributing approximately a bit more than 0.2 of accuracy.
assert informative_importances.min() > 0.15
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
rng = np.random.RandomState(1)
n_samples = 1000
n_features = 2
n_half_samples = n_samples // 2
x = rng.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01)
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == pytest.approx(
x1_x2_imp_ratio_w_none, 0.01)
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01)
def test_permutation_importance_no_weights_scoring_function():
# Creating a scorer function that does not takes sample_weight
def my_scorer(estimator, X, y):
return 1
# Creating some data and estimator for the permutation test
x = np.array([[1, 2], [3, 4]])
y = np.array([1, 2])
w = np.array([1, 1])
lr = LinearRegression()
lr.fit(x, y)
# test that permutation_importance does not return error when
# sample_weight is None
try:
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1)
except TypeError:
pytest.fail("permutation_test raised an error when using a scorer "
"function that does not accept sample_weight even though "
"sample_weight was None")
# test that permutation_importance raise exception when sample_weight is
# not None
with pytest.raises(TypeError):
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1,
sample_weight=w)
| bsd-3-clause |
gfyoung/numpy | numpy/lib/twodim_base.py | 2 | 27180 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
deepmind/open_spiel | open_spiel/python/egt/alpharank_visualizer_test.py | 1 | 2447 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.alpharank_visualizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use("agg") # switch backend for testing
import mock
import numpy as np
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
import pyspiel
class AlpharankVisualizerTest(absltest.TestCase):
@mock.patch("%s.alpharank_visualizer.plt" % __name__)
def test_plot_pi_vs_alpha(self, mock_plt):
# Construct game
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
_, payoff_tables = utils.is_symmetric_matrix_game(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
# Compute alpharank
alpha = 1e2
_, _, pi, num_profiles, num_strats_per_population =\
alpharank.compute(payoff_tables, alpha=alpha)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
num_populations = len(payoff_tables)
# Construct synthetic pi-vs-alpha history
pi_list = np.empty((num_profiles, 0))
alpha_list = []
for _ in range(2):
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Test plotting code (via pyplot mocking to prevent plot pop-up)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=0)
self.assertTrue(mock_plt.show.called)
if __name__ == "__main__":
absltest.main()
| apache-2.0 |
quantopian/zipline | zipline/data/in_memory_daily_bars.py | 1 | 5363 | from six import iteritems
import numpy as np
import pandas as pd
from pandas import NaT
from trading_calendars import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self,
frames,
calendar,
currency_codes,
verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in iteritems(frames)}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames['close'].index
self._sids = frames['close'].columns
@classmethod
def from_panel(cls, panel, calendar, currency_codes):
"""Helper for construction from a pandas.Panel.
"""
return cls(dict(panel.iteritems()), calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames['close'].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return np.array([codes[sid] for sid in sids])
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
)
| apache-2.0 |
jakobworldpeace/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
larsmans/scikit-learn | sklearn/cluster/setup.py | 31 | 1248 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
BhallaLab/moose-core | tests/core/test_function_example.py | 2 | 3483 | # Modified from function.py ---
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import moose
simtime = 1.0
def test_example():
moose.Neutral('/model')
function = moose.Function('/model/function')
function.c['c0'] = 1.0
function.c['c1'] = 2.0
#function.x.num = 1
function.expr = 'c0 * exp(c1*x0) * cos(y0) + sin(t)'
# mode 0 - evaluate function value, derivative and rate
# mode 1 - just evaluate function value,
# mode 2 - evaluate derivative,
# mode 3 - evaluate rate
function.mode = 0
function.independent = 'y0'
nsteps = 1000
xarr = np.linspace(0.0, 1.0, nsteps)
# Stimulus tables allow you to store sequences of numbers which
# are delivered via the 'output' message at each time step. This
# is a placeholder and in real scenario you will be using any
# sourceFinfo that sends out a double value.
input_x = moose.StimulusTable('/xtab')
input_x.vector = xarr
input_x.startTime = 0.0
input_x.stepPosition = xarr[0]
input_x.stopTime = simtime
moose.connect(input_x, 'output', function.x[0], 'input')
yarr = np.linspace(-np.pi, np.pi, nsteps)
input_y = moose.StimulusTable('/ytab')
input_y.vector = yarr
input_y.startTime = 0.0
input_y.stepPosition = yarr[0]
input_y.stopTime = simtime
moose.connect(function, 'requestOut', input_y, 'getOutputValue')
# data recording
result = moose.Table('/ztab')
moose.connect(result, 'requestOut', function, 'getValue')
derivative = moose.Table('/zprime')
moose.connect(derivative, 'requestOut', function, 'getDerivative')
rate = moose.Table('/dz_by_dt')
moose.connect(rate, 'requestOut', function, 'getRate')
x_rec = moose.Table('/xrec')
moose.connect(x_rec, 'requestOut', input_x, 'getOutputValue')
y_rec = moose.Table('/yrec')
moose.connect(y_rec, 'requestOut', input_y, 'getOutputValue')
dt = simtime/nsteps
for ii in range(32):
moose.setClock(ii, dt)
moose.reinit()
moose.start(simtime)
# Uncomment the following lines and the import matplotlib.pyplot as plt on top
# of this file to display the plot.
plt.subplot(3,1,1)
plt.plot(x_rec.vector, result.vector, 'r-', label='z = {}'.format(function.expr))
z = function.c['c0'] * np.exp(function.c['c1'] * xarr) * np.cos(yarr) + np.sin(np.arange(len(xarr)) * dt)
plt.plot(xarr, z, 'b--', label='numpy computed')
plt.xlabel('x')
plt.ylabel('z')
plt.legend()
plt.subplot(3,1,2)
plt.plot(y_rec.vector, derivative.vector, 'r-', label='dz/dy0')
# derivatives computed by putting x values in the analytical formula
dzdy = function.c['c0'] * np.exp(function.c['c1'] * xarr) * (- np.sin(yarr))
plt.plot(yarr, dzdy, 'b--', label='numpy computed')
plt.xlabel('y')
plt.ylabel('dz/dy')
plt.legend()
plt.subplot(3,1,3)
# *** BEWARE *** The first two entries are spurious. Entry 0 is
# *** from reinit sending out the defaults. Entry 2 is because
# *** there is no lastValue for computing real forward difference.
plt.plot(np.arange(2, len(rate.vector), 1) * dt, rate.vector[2:], 'r-', label='dz/dt')
dzdt = np.diff(z)/dt
plt.plot(np.arange(0, len(dzdt), 1.0) * dt, dzdt, 'b--', label='numpy computed')
plt.xlabel('t')
plt.ylabel('dz/dt')
plt.legend()
plt.tight_layout()
plt.savefig(__file__+'.png')
if __name__ == '__main__':
test_example()
| gpl-3.0 |
mojolab/LivingData | lib/livdatops.py | 1 | 1153 | import pandas
def getColRenameDict(mergersheet,sheet):
colrenamedict={}
originalcolnames=mergersheet[sheet].fillna("NA")
newcolnames=mergersheet[mergersheet.columns[0]]
for i in range(0,len(originalcolnames)):
colrenamedict[originalcolnames[i]]=newcolnames[i]
# if originalcolnames[i]!="NA":
# colrenamedict[originalcolnames[i]]=newcolnames[i]
return colrenamedict
def createMergedDFList(dflist,mergersheetname):
altereddfs={}
for sheet,matrix in dflist.iteritems():
if sheet == mergersheetname:
altereddfs[sheet]=matrix
mergersheet=matrix
else:
df=matrix
print df.columns
columnrenamedict=getColRenameDict(mergersheet,sheet)
print columnrenamedict
altereddf=df.rename(columns=columnrenamedict)
for key,value in columnrenamedict.iteritems():
if key =="NA":
altereddf[value]=0
print df,altereddf
altereddfs[sheet]=altereddf
finalsheet=[]
for sheet,matrix in altereddfs.iteritems():
if sheet!=mergersheetname:
finalsheet.append(matrix.fillna(0))
finalsheetm=pandas.concat(finalsheet)
finalsheetname=mergersheet.columns.values[0]
altereddfs[finalsheetname]=finalsheetm
return altereddfs
| apache-2.0 |
nomadcube/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
etamponi/resilient-protocol | resilient/ensemble.py | 1 | 6786 | import hashlib
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.utils.fixes import unique
from sklearn import preprocessing
from sklearn.utils.random import check_random_state
from resilient.logger import Logger
from resilient.selection_strategies import SelectBestPercent
from resilient.train_set_generators import RandomCentroidPDFTrainSetGenerator
from resilient.weighting_strategies import CentroidBasedWeightingStrategy
__author__ = 'Emanuele Tamponi <emanuele.tamponi@diee.unica.it>'
MAX_INT = numpy.iinfo(numpy.int32).max
class TrainingStrategy(BaseEstimator):
def __init__(self,
base_estimator=DecisionTreeClassifier(max_features='auto'),
train_set_generator=RandomCentroidPDFTrainSetGenerator(),
random_sample=None):
self.base_estimator = base_estimator
self.train_set_generator = train_set_generator
self.random_sample = random_sample
def train_estimators(self, n, inp, y, weighting_strategy, random_state):
classifiers = []
weight_generator = self.train_set_generator.get_sample_weights(
n, inp, y, random_state
)
for i, weights in enumerate(weight_generator):
if self.random_sample is not None:
ix = random_state.choice(
len(y),
size=int(self.random_sample*len(y)),
p=weights, replace=True
)
weights = numpy.bincount(ix, minlength=len(y))
s = weights.sum()
weights = numpy.array([float(w) / s for w in weights])
Logger.get().write("!Training estimator:", (i+1))
est = self._make_estimator(inp, y, weights, random_state)
weighting_strategy.add_estimator(est, inp, y, weights)
classifiers.append(est)
return classifiers
def _make_estimator(self, inp, y, sample_weights, random_state):
seed = random_state.randint(MAX_INT)
est = clone(self.base_estimator)
est.set_params(random_state=check_random_state(seed))
est.fit(inp, y, sample_weight=sample_weights)
return est
class ResilientEnsemble(BaseEstimator, ClassifierMixin):
def __init__(self,
pipeline=None,
n_estimators=10,
training_strategy=TrainingStrategy(),
weighting_strategy=CentroidBasedWeightingStrategy(),
selection_strategy=SelectBestPercent(),
multiply_by_weight=False,
use_prob=True,
random_state=None):
self.pipeline = pipeline
self.n_estimators = n_estimators
self.training_strategy = training_strategy
self.weighting_strategy = weighting_strategy
self.selection_strategy = selection_strategy
self.multiply_by_weight = multiply_by_weight
self.use_prob = use_prob
self.random_state = random_state
# Training time attributes
self.classes_ = None
self.n_classes_ = None
self.classifiers_ = None
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.random_state_ = None
def fit(self, inp, y):
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
self.random_state_ = check_random_state(self.random_state)
if self.pipeline is not None:
inp = self.pipeline.fit_transform(inp)
self.weighting_strategy.prepare(inp, y)
self.classifiers_ = self.training_strategy.train_estimators(
self.n_estimators, inp, y,
self.weighting_strategy, self.random_state_
)
# Reset it to null because the previous line uses self.predict
self.precomputed_probs_ = None
self.precomputed_weights_ = None
return self
def predict_proba(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, (N, n_classes_), each row sums to one
if self.precomputed_probs_ is None:
self._precompute(inp)
prob = numpy.zeros((len(inp), self.n_classes_))
for i in range(len(inp)):
active_indices = self.selection_strategy.get_indices(
self.precomputed_weights_[i], self.random_state_
)
prob[i] = self.precomputed_probs_[i][active_indices].sum(axis=0)
preprocessing.normalize(prob, norm='l1', copy=False)
return prob
def predict(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, N, one label per instance
if self.pipeline is not None:
inp = self.pipeline.transform(inp)
p = self.predict_proba(inp)
return self.classes_[numpy.argmax(p, axis=1)]
def _precompute(self, inp):
self.precomputed_probs_ = numpy.zeros(
(len(inp), len(self.classifiers_), self.n_classes_)
)
self.precomputed_weights_ = numpy.zeros(
(len(inp), len(self.classifiers_))
)
for i, x in enumerate(inp):
Logger.get().write(
"!Computing", len(inp), "probabilities and weights:", (i+1)
)
for j, cls in enumerate(self.classifiers_):
prob = cls.predict_proba(x)[0]
if not self.use_prob:
max_index = prob.argmax()
prob = numpy.zeros_like(prob)
prob[max_index] = 1
self.precomputed_probs_[i][j] = prob
self.precomputed_weights_[i] = (
self.weighting_strategy.weight_estimators(x)
)
if self.multiply_by_weight:
for j in range(len(self.classifiers_)):
self.precomputed_probs_[i][j] *= (
self.precomputed_weights_[i][j]
)
def get_directory(self):
current_state = self.random_state
current_selection = self.selection_strategy
self.random_state = None
self.selection_strategy = None
filename = hashlib.md5(str(self)).hexdigest()
self.random_state = current_state
self.selection_strategy = current_selection
return filename
def get_filename(self):
return self.get_directory() + "/ensemble"
def __eq__(self, other):
return isinstance(other, ResilientEnsemble) and (
self.get_directory() == other.get_directory()
)
def __hash__(self):
return hash(self.get_directory())
| gpl-2.0 |
mudbungie/NetExplorer | env/lib/python3.4/site-packages/networkx/tests/test_convert_pandas.py | 43 | 2177 | from nose import SkipTest
from nose.tools import assert_true
import networkx as nx
class TestConvertPandas(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
try:
import pandas as pd
except ImportError:
raise SkipTest('Pandas not available.')
def __init__(self, ):
global pd
import pandas as pd
self.r = pd.np.random.RandomState(seed=5)
ints = self.r.random_integers(1, 10, size=(3,2))
a = ['A', 'B', 'C']
b = ['D', 'A', 'E']
df = pd.DataFrame(ints, columns=['weight', 'cost'])
df[0] = a # Column label 0 (int)
df['b'] = b # Column label 'b' (str)
self.df = df
def assert_equal(self, G1, G2):
assert_true( nx.is_isomorphic(G1, G2, edge_match=lambda x, y: x == y ))
def test_from_dataframe_all_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', True)
self.assert_equal(G, Gtrue)
def test_from_dataframe_multi_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'cost': 9, 'weight': 10}),
('B', 'A', {'cost': 1, 'weight': 7}),
('A', 'D', {'cost': 7, 'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', ['weight', 'cost'])
self.assert_equal(G, Gtrue)
def test_from_dataframe_one_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {'weight': 10}),
('B', 'A', {'weight': 7}),
('A', 'D', {'weight': 4})])
G=nx.from_pandas_dataframe(self.df, 0, 'b', 'weight')
self.assert_equal(G, Gtrue)
def test_from_dataframe_no_attr(self, ):
Gtrue = nx.Graph([('E', 'C', {}),
('B', 'A', {}),
('A', 'D', {})])
G=nx.from_pandas_dataframe(self.df, 0, 'b',)
self.assert_equal(G, Gtrue)
| mit |
aminert/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
rajat1994/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
ifarup/colourlab | tests/test_misc.py | 1 | 1116 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
test_misc: Unittests for all functions in the misc module.
Copyright (C) 2017 Ivar Farup
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import matplotlib
import matplotlib.pyplot as plt
from colourlab import misc, space, data
t = data.g_MacAdam()
ell = t.get_ellipses(space.xyY)
_, ax = plt.subplots()
misc.plot_ellipses(ell, ax)
misc.plot_ellipses(ell)
class TestPlot(unittest.TestCase):
def test_plot(self):
self.assertTrue(isinstance(ax, matplotlib.axes.Axes))
| gpl-3.0 |
rlouf/patterns-of-segregation | bin/plot_gini.py | 1 | 2527 | """plot_gini.py
Plot the Gini of the income distribution as a function of the number of
households in cities.
"""
from __future__ import division
import csv
import numpy as np
import itertools
from matplotlib import pylab as plt
#
# Parameters and functions
#
income_bins = [1000,12500,17500,22500,27500,32500,37500,42500,47500,55000,70000,90000,115000,135000,175000,300000]
# Puerto-rican cities are excluded from the analysis
PR_cities = ['7442','0060','6360','4840']
#
# Read data
#
## List of MSA
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
if rows[0] not in PR_cities:
msa[rows[0]] = rows[1]
#
# Compute gini for all msa
#
gini = []
households = []
for n, city in enumerate(msa):
print "Compute Gini index for %s (%s/%s)"%(msa[city], n+1, len(msa))
## Import households income
data = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
num_cat = len(rows[1:])
data[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])}
# Sum over all areal units
incomes = {cat:sum([data[au][cat] for au in data]) for cat in range(num_cat)}
## Compute the Gini index
# See Dixon, P. M.; Weiner, J.; Mitchell-Olds, T.; and Woodley, R.
# "Bootstrapping the Gini Coefficient of Inequality." Ecology 68, 1548-1551, 1987.
g = 0
pop = 0
for a,b in itertools.permutations(incomes, 2):
g += incomes[a]*incomes[b]*abs(income_bins[a]-income_bins[b])
pop = sum([incomes[a] for a in incomes])
average = sum([incomes[a]*income_bins[a] for a in incomes])/pop
gini.append((1/(2*pop**2*average))*g)
households.append(pop)
#
# Plot
#
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(households, gini, 'o', color='black', mec='black')
ax.set_xlabel(r'$H$', fontsize=30)
ax.set_ylabel(r'$Gini$', fontsize=30)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 10)) # outward by 10 points
ax.spines['bottom'].set_position(('outward', 10)) # outward by 10 points
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xscale('log')
plt.savefig('figures/paper/si/gini_income.pdf', bbox_inches='tight')
plt.show()
| bsd-3-clause |
dhhagan/ACT | ACT/thermo/visualize.py | 1 | 13306 | """
Classes and functions used to visualize data for thermo scientific analyzers
"""
from pandas import Series, DataFrame
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates as d
import os
import math
import glob
import matplotlib
import warnings
import sys
__all__ = ['diurnal_plot','diurnal_plot_single', 'ThermoPlot']
def diurnal_plot(data, dates=[], shaded=False, title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO"):
'''
If plotting the entire DataFrame (data), choose all_data=True, else choose all_data=False
and declare the date or dates to plot as a list. `data` should be a pandas core DataFrame
with time index and each trace gas concentration as a column
returns a single plot for NOx, SO2, and O3
>>>
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
print ("data is not a pandas DataFrame, thus this will not end well for you.")
exit
# If length of dates is zero, plot everything
if len(dates) == 0:
# Plot everything, yo!
pass
elif len(dates) == 1:
# Plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# Plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("Dates are not properly configured.")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(10,9), sharex=True)
# Set plot titles and labels
ax1.set_title(title, fontsize=14)
ax1.set_ylabel(r'$\ [NO_x] (ppb)$', fontsize=14, weight='bold')
ax2.set_ylabel(r'$\ [SO_2] (ppb)$', fontsize=14)
ax3.set_ylabel(r'$\ [O_3] (ppb)$', fontsize=14)
ax3.set_xlabel(xlabel, fontsize=14)
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False)
plt.setp( ax2.get_xticklabels(), visible=False)
# Set y min to zero just in case:
ax1.set_ylim(0,grouped['nox']['mean'].max()*1.05)
ax2.set_ylim(0,grouped['so2']['mean'].max()*1.05)
ax3.set_ylim(0,grouped['o3']['mean'].max()*1.05)
# Plot means
ax1.plot(grouped.index, grouped['nox']['mean'],'g', linewidth=2.0)
ax2.plot(grouped.index, grouped['so2']['mean'], 'r', linewidth=2.0)
ax3.plot(grouped.index, grouped['o3']['mean'], 'b', linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax1.plot(grouped.index, grouped['nox']['75%'],'g')
ax1.plot(grouped.index, grouped['nox']['25%'],'g')
ax1.set_ylim(0,grouped['nox']['75%'].max()*1.05)
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['75%'], alpha=.5, facecolor='green')
ax1.fill_between(grouped.index, grouped['nox']['mean'], grouped['nox']['25%'], alpha=.5, facecolor='green')
ax2.plot(grouped.index, grouped['so2']['75%'],'r')
ax2.plot(grouped.index, grouped['so2']['25%'],'r')
ax2.set_ylim(0,grouped['so2']['75%'].max()*1.05)
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['75%'], alpha=.5, facecolor='red')
ax2.fill_between(grouped.index, grouped['so2']['mean'], grouped['so2']['25%'], alpha=.5, facecolor='red')
ax3.plot(grouped.index, grouped['o3']['75%'],'b')
ax3.plot(grouped.index, grouped['o3']['25%'],'b')
ax3.set_ylim(0,grouped['o3']['75%'].max()*1.05)
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['75%'], alpha=.5, facecolor='blue')
ax3.fill_between(grouped.index, grouped['o3']['mean'], grouped['o3']['25%'], alpha=.5, facecolor='blue')
# Get/Set xticks
ticks = ax1.get_xticks()
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax3.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax3.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, (ax1, ax2, ax3))
def diurnal_plot_single(data, model='', dates=[], shaded=False, color1 = 'blue',
title="Diurnal Profile of Trace Gases", xlabel="Local Time: East St. Louis, MO",
ylabel=r'$\ [NO_x] (ppb)$'):
'''
`data` should be a pandas core DataFrame with time index and each trace gas concentration as a column
returns a single plot for one of the three analyzers.
>>>diurnal_plot_single(data,model='o3', ylabel='O3', shaded=True, color1='green')
'''
# Check to make sure the data is a valid dataframe
if not isinstance(data, pd.DataFrame):
sys.exit("data is not a pandas DataFrame, thus this will not end well for you.")
# Check to make sure the model is valid
if model.lower() not in ['nox','so2','o3','sox']:
sys.exit("Model is not defined correctly: options are ['nox','so2','sox','o3']")
# Set model to predefined variable
if model.lower() == 'nox':
instr = 'nox'
elif model.lower() == 'so2' or model.lower() == 'sox':
instr = 'sox'
else:
instr = 'o3'
# If not plotting all the data, truncate the dataframe to include only the needed data
if len(dates) == 0:
# plot everything
pass
elif len(dates) == 1:
# plot just this date
data = data[dates[0]]
elif len(dates) == 2:
# plot between these dates
data = data[dates[0]:dates[1]]
else:
sys.exit("You have an error with how you defined your dates")
# Add columns for time to enable simple diurnal trends to be found
data['Time'] = data.index.map(lambda x: x.strftime("%H:%M"))
# Group the data by time and grab the statistics
grouped = data.groupby('Time').describe().unstack()
# set the index to be a str
grouped.index = pd.to_datetime(grouped.index.astype(str))
# Plot
fig, ax = plt.subplots(1, figsize=(8,4))
# Set plot titles and labels
ax.set_title(title, fontsize=14)
ax.set_ylabel(ylabel, fontsize=14, weight='bold')
ax.set_xlabel(xlabel, fontsize=14)
# Set y min to zero just in case:
ax.set_ylim(0,grouped[instr]['mean'].max()*1.05)
# Plot means
ax.plot(grouped.index, grouped[instr]['mean'], color1,linewidth=2.0)
# If shaded=true, plot trends
if shaded == True:
ax.plot(grouped.index, grouped[instr]['75%'],color1)
ax.plot(grouped.index, grouped[instr]['25%'],color1)
ax.set_ylim(0,grouped[instr]['75%'].max()*1.05)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['75%'], alpha=.5, facecolor=color1)
ax.fill_between(grouped.index, grouped[instr]['mean'], grouped[instr]['25%'], alpha=.5, facecolor=color1)
# Get/Set xticks
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 5))
ax.set_xticks(np.linspace(ticks[0], d.date2num(d.num2date(ticks[-1]) + dt.timedelta(hours=3)), 25), minor=True)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%I:%M %p'))
# Make the layout tight to get rid of some whitespace
plt.tight_layout()
plt.show()
return (fig, ax)
class ThermoPlot():
'''
Allows for easy plotting of internal instrument data. Currently supports the
following models:
- NO, NO2, NOx (42I)
- O3 (49I)
- SO2 (43I)
'''
def __init__(self, data):
self.data = data
def debug_plot(self, args={}):
'''
Plots thermo scientific instrument data for debugging purposes. The top plot contains internal
instrument data such as flow rates and temperatures. The bottom plot contains trace gas data for the
instrument.
instrument must be set to either nox, so2, sox, or o3
>>> nox = ThermoPlot(data)
>>> f, (a1, a2, a3) = nox.debug_plot()
'''
default_args = {
'xlabel':'Local Time, East St Louis, MO',
'ylabpressure':'Flow (LPM)',
'ylabgas':'Gas Conc. (ppb)',
'ylabtemp':'Temperature (C)',
'title_fontsize':'18',
'labels_fontsize':'14',
'grid':False
}
# Figure out what model we are trying to plot and set instrument specific default args
cols = [i.lower() for i in self.data.columns.values.tolist()]
if 'o3' in cols:
default_args['instrument'] = 'o3'
default_args['title'] = "Debug Plot for " + r'$\ O_{3} $' + ": Model 49I"
default_args['color_o3'] = 'blue'
elif 'sox' in cols or 'so2' in cols:
default_args['instrument'] = 'so2'
default_args['title'] = "Debug Plot for " + r'$\ SO_{2} $' + ": Model 43I"
default_args['color_so2'] = 'green'
elif 'nox' in cols:
default_args['instrument'] = 'nox'
default_args['title'] = "Debug Plot for " + r'$\ NO_{x} $' + ": Model 42I"
default_args['color_no'] = '#FAB923'
default_args['color_nox'] = '#FC5603'
default_args['color_no2'] = '#FAE823'
else:
sys.exit("Could not figure out what isntrument this is for")
# If kwargs are set, replace the default values
for key, val in default_args.iteritems():
if args.has_key(key):
default_args[key] = args[key]
# Set up Plot and all three axes
fig, (ax1, ax3) = plt.subplots(2, figsize=(10,6), sharex=True)
ax2 = ax1.twinx()
# set up axes labels and titles
ax1.set_title(default_args['title'], fontsize=default_args['title_fontsize'])
ax1.set_ylabel(default_args['ylabpressure'], fontsize=default_args['labels_fontsize'])
ax2.set_ylabel(default_args['ylabtemp'], fontsize=default_args['labels_fontsize'])
ax3.set_ylabel(default_args['ylabgas'], fontsize=default_args['labels_fontsize'])
ax3.set_xlabel(default_args['xlabel'], fontsize=default_args['labels_fontsize'])
# Make the ticks invisible on the first and second plots
plt.setp( ax1.get_xticklabels(), visible=False )
# Plot the debug data on the top graph
if default_args['instrument'] == 'o3':
self.data['bncht'].plot(ax=ax2, label=r'$\ T_{bench}$')
self.data['lmpt'].plot(ax=ax2, label=r'$\ T_{lamp}$')
self.data['flowa'].plot(ax=ax1, label=r'$\ Q_{A}$', style='--')
self.data['flowb'].plot(ax=ax1, label=r'$\ Q_{B}$', style='--')
self.data['o3'].plot(ax=ax3, color=default_args['color_o3'], label=r'$\ O_{3}$')
elif default_args['instrument'] == 'so2':
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['smplfl'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['so2'].plot(ax=ax3, label=r'$\ SO_2 $', color=default_args['color_so2'], ylim=[0,self.data['so2'].max()*1.05])
else:
m = max(self.data['convt'].max(),self.data['intt'].max(),self.data['pmtt'].max())
self.data['convt'].plot(ax=ax2, label=r'$\ T_{converter}$')
self.data['intt'].plot(ax=ax2, label=r'$\ T_{internal}$')
self.data['rctt'].plot(ax=ax2, label=r'$\ T_{reactor}$')
self.data['pmtt'].plot(ax=ax2, label=r'$\ T_{PMT}$')
self.data['smplf'].plot(ax=ax1, label=r'$\ Q_{sample}$', style='--')
self.data['ozonf'].plot(ax=ax1, label=r'$\ Q_{ozone}$', style='--')
self.data['no'].plot(ax=ax3, label=r'$\ NO $', color=default_args['color_no'])
self.data['no2'].plot(ax=ax3, label=r'$\ NO_{2}$', color=default_args['color_no2'])
self.data['nox'].plot(ax=ax3, label=r'$\ NO_{x}$', color=default_args['color_nox'], ylim=(0,math.ceil(self.data.nox.max()*1.05)))
# Legends
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
plt.legend(lines+lines2, labels+labels2, bbox_to_anchor=(1.10, 1), loc=2, borderaxespad=0.)
ax3.legend(bbox_to_anchor=(1.10, 1.), loc=2, borderaxespad=0.)
# Hide grids?
ax1.grid(default_args['grid'])
ax2.grid(default_args['grid'])
ax3.grid(default_args['grid'])
# More of the things..
plt.tight_layout()
plt.show()
return fig, (ax1, ax2, ax3) | mit |
AlexanderFabisch/scikit-learn | sklearn/manifold/t_sne.py | 13 | 34618 | # Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
| bsd-3-clause |
ottermegazord/ottermegazord.github.io | onexi/data_processing/s05_genPlots.py | 1 | 1460 | import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pdb
import sys
plt.style.use("ggplot")
os.chdir("..")
ipath = "./Data/Final_Data/"
ifile = "Final_Data"
opath = "./Data/Final_Data/Neighborhoods/"
imgpath = "./Plots/Neighborhood_TS/"
ext = ".csv"
input_var = raw_input("Run mode (analysis/plot): ")
if input_var == "analysis":
df = pd.read_csv(ipath + ifile + ext, low_memory=False)
df2 = df.groupby(["TIME", "NEIGHBORHOOD"]).mean().unstack()
time = df["TIME"].unique().tolist()
nhood = df["NEIGHBORHOOD"].unique().tolist()
nhood = [x for x in nhood if str(x) != 'nan']
for n in nhood:
mean = []
for t in time:
mean.append(df2.loc[t, ("AV_PER_SQFT", n)])
out_df = pd.DataFrame({'TIME': time, 'MEAN_AV_PER_SQFT': mean})
out_df.to_csv(opath + n + ext, index=False)
elif input_var == "plot":
def makePlot(x, y, xlabel, ylabel, title, filename):
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, y, color='green')
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.xticks(x_pos, x, fontsize=8)
plt.savefig(filename, bbox_inches="tight", dpi=300)
plt.close()
nhood_files = os.listdir(opath)
for f in nhood_files:
nhood = f[:-4]
df = pd.read_csv(opath + f, low_memory=False)
makePlot(x=df["TIME"].tolist(), y=df["MEAN_AV_PER_SQFT"].tolist(), ylabel="AVG LAND VALUE ($/sqft)", xlabel="TIME (year)", title=nhood, filename=imgpath + nhood +".png")
| mit |
linebp/pandas | pandas/io/packers.py | 4 | 27509 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas._libs.tslib import NaTType
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/array/tests/test_percentiles.py | 4 | 2323 | import pytest
pytest.importorskip('numpy')
import numpy as np
import dask.array as da
from dask.array.utils import assert_eq, same_keys
def test_percentile():
d = da.ones((16,), chunks=(4,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([1, 1, 1], dtype=d.dtype))
x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
d = da.from_array(x, chunks=(3,))
result = da.percentile(d, [0, 50, 100])
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([0, 5, 20], dtype=result.dtype))
assert same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50, 100]))
assert not same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50]))
x = np.array(['a', 'a', 'd', 'd', 'd', 'e'])
d = da.from_array(x, chunks=(3,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array(['a', 'd', 'e'], dtype=x.dtype))
@pytest.mark.skip
def test_percentile_with_categoricals():
try:
import pandas as pd
except ImportError:
return
x0 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
x1 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
dsk = {('x', 0): x0, ('x', 1): x1}
x = da.Array(dsk, 'x', chunks=((6, 6),))
p = da.percentile(x, [50])
assert (p.compute().categories == x0.categories).all()
assert (p.compute().codes == [0]).all()
assert same_keys(da.percentile(x, [50]),
da.percentile(x, [50]))
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert_eq(da.percentile(x, [10, 50, 90]), np.array([1, 1, 1], dtype=x.dtype))
@pytest.mark.parametrize('q', [5, 5.0, np.int64(5), np.float64(5)])
def test_percentiles_with_scaler_percentile(q):
# Regression test to ensure da.percentile works with scalar percentiles
# See #3020
d = da.ones((16,), chunks=(4,))
assert_eq(da.percentile(d, q), np.array([1], dtype=d.dtype))
def test_unknown_chunk_sizes():
x = da.random.random(1000, chunks=(100,))
x._chunks = ((np.nan,) * 10,)
result = da.percentile(x, 50).compute()
assert 0.1 < result < 0.9
a, b = da.percentile(x, [40, 60]).compute()
assert 0.1 < a < 0.9
assert 0.1 < b < 0.9
assert a < b
| gpl-3.0 |
SciLifeLab/bcbio-nextgen | bcbio/rnaseq/count.py | 1 | 12286 | """
count number of reads mapping to features of transcripts
"""
import os
import sys
import itertools
# soft imports
try:
import HTSeq
import pandas as pd
import gffutils
except ImportError:
HTSeq, pd, gffutils = None, None, None
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio import bam
import bcbio.pipeline.datadict as dd
def _get_files(data):
mapped = bam.mapped(data["work_bam"], data["config"])
in_file = bam.sort(mapped, data["config"], order="queryname")
gtf_file = dd.get_gtf_file(data)
work_dir = dd.get_work_dir(data)
out_dir = os.path.join(work_dir, "htseq-count")
sample_name = dd.get_sample_name(data)
out_file = os.path.join(out_dir, sample_name + ".counts")
stats_file = os.path.join(out_dir, sample_name + ".stats")
return in_file, gtf_file, out_file, stats_file
def invert_strand(iv):
iv2 = iv.copy()
if iv2.strand == "+":
iv2.strand = "-"
elif iv2.strand == "-":
iv2.strand = "+"
else:
raise ValueError("Illegal strand")
return iv2
class UnknownChrom(Exception):
pass
def _get_stranded_flag(data):
strand_flag = {"unstranded": "no",
"firststrand": "reverse",
"secondstrand": "yes"}
stranded = dd.get_strandedness(data, "unstranded").lower()
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', 'secondstrand', "
"and 'unstranded")
return strand_flag[stranded]
def htseq_count(data):
""" adapted from Simon Anders htseq-count.py script
http://www-huber.embl.de/users/anders/HTSeq/doc/count.html
"""
sam_filename, gff_filename, out_file, stats_file = _get_files(data)
stranded = _get_stranded_flag(data["config"])
overlap_mode = "union"
feature_type = "exon"
id_attribute = "gene_id"
minaqual = 0
if file_exists(out_file):
return out_file
logger.info("Counting reads mapping to exons in %s using %s as the "
"annotation and strandedness as %s." %
(os.path.basename(sam_filename), os.path.basename(gff_filename), dd.get_strandedness(data)))
features = HTSeq.GenomicArrayOfSets("auto", stranded != "no")
counts = {}
# Try to open samfile to fail early in case it is not there
open(sam_filename).close()
gff = HTSeq.GFF_Reader(gff_filename)
i = 0
try:
for f in gff:
if f.type == feature_type:
try:
feature_id = f.attr[id_attribute]
except KeyError:
sys.exit("Feature %s does not contain a '%s' attribute" %
(f.name, id_attribute))
if stranded != "no" and f.iv.strand == ".":
sys.exit("Feature %s at %s does not have strand "
"information but you are running htseq-count "
"in stranded mode. Use '--stranded=no'." %
(f.name, f.iv))
features[f.iv] += feature_id
counts[f.attr[id_attribute]] = 0
i += 1
if i % 100000 == 0:
sys.stderr.write("%d GFF lines processed.\n" % i)
except:
sys.stderr.write("Error occured in %s.\n"
% gff.get_line_number_string())
raise
sys.stderr.write("%d GFF lines processed.\n" % i)
if len(counts) == 0:
sys.stderr.write("Warning: No features of type '%s' found.\n"
% feature_type)
try:
align_reader = htseq_reader(sam_filename)
first_read = iter(align_reader).next()
pe_mode = first_read.paired_end
except:
sys.stderr.write("Error occured when reading first line of sam "
"file.\n")
raise
try:
if pe_mode:
read_seq_pe_file = align_reader
read_seq = HTSeq.pair_SAM_alignments(align_reader)
empty = 0
ambiguous = 0
notaligned = 0
lowqual = 0
nonunique = 0
i = 0
for r in read_seq:
i += 1
if not pe_mode:
if not r.aligned:
notaligned += 1
continue
try:
if r.optional_field("NH") > 1:
nonunique += 1
continue
except KeyError:
pass
if r.aQual < minaqual:
lowqual += 1
continue
if stranded != "reverse":
iv_seq = (co.ref_iv for co in r.cigar if co.type == "M"
and co.size > 0)
else:
iv_seq = (invert_strand(co.ref_iv) for co in r.cigar if
co.type == "M" and co.size > 0)
else:
if r[0] is not None and r[0].aligned:
if stranded != "reverse":
iv_seq = (co.ref_iv for co in r[0].cigar if
co.type == "M" and co.size > 0)
else:
iv_seq = (invert_strand(co.ref_iv) for co in r[0].cigar if
co.type == "M" and co.size > 0)
else:
iv_seq = tuple()
if r[1] is not None and r[1].aligned:
if stranded != "reverse":
iv_seq = itertools.chain(iv_seq,
(invert_strand(co.ref_iv) for co
in r[1].cigar if co.type == "M"
and co.size > 0))
else:
iv_seq = itertools.chain(iv_seq,
(co.ref_iv for co in r[1].cigar
if co.type == "M" and co.size
> 0))
else:
if (r[0] is None) or not (r[0].aligned):
notaligned += 1
continue
try:
if (r[0] is not None and r[0].optional_field("NH") > 1) or \
(r[1] is not None and r[1].optional_field("NH") > 1):
nonunique += 1
continue
except KeyError:
pass
if (r[0] and r[0].aQual < minaqual) or (r[1] and
r[1].aQual < minaqual):
lowqual += 1
continue
try:
if overlap_mode == "union":
fs = set()
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[iv].steps():
fs = fs.union(fs2)
elif (overlap_mode == "intersection-strict" or
overlap_mode == "intersection-nonempty"):
fs = None
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[iv].steps():
if (len(fs2) > 0 or overlap_mode == "intersection-strict"):
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection(fs2)
else:
sys.exit("Illegal overlap mode.")
if fs is None or len(fs) == 0:
empty += 1
elif len(fs) > 1:
ambiguous += 1
else:
counts[list(fs)[0]] += 1
except UnknownChrom:
if not pe_mode:
rr = r
else:
rr = r[0] if r[0] is not None else r[1]
empty += 1
if i % 100000 == 0:
sys.stderr.write("%d sam %s processed.\n" %
(i, "lines " if not pe_mode else "line pairs"))
except:
if not pe_mode:
sys.stderr.write("Error occured in %s.\n"
% read_seq.get_line_number_string())
else:
sys.stderr.write("Error occured in %s.\n"
% read_seq_pe_file.get_line_number_string())
raise
sys.stderr.write("%d sam %s processed.\n" %
(i, "lines " if not pe_mode else "line pairs"))
with file_transaction(data, out_file) as tmp_out_file:
with open(tmp_out_file, "w") as out_handle:
on_feature = 0
for fn in sorted(counts.keys()):
on_feature += counts[fn]
out_handle.write("%s\t%d\n" % (fn, counts[fn]))
with file_transaction(data, stats_file) as tmp_stats_file:
with open(tmp_stats_file, "w") as out_handle:
out_handle.write("on_feature\t%d\n" % on_feature)
out_handle.write("no_feature\t%d\n" % empty)
out_handle.write("ambiguous\t%d\n" % ambiguous)
out_handle.write("too_low_aQual\t%d\n" % lowqual)
out_handle.write("not_aligned\t%d\n" % notaligned)
out_handle.write("alignment_not_unique\t%d\n" % nonunique)
return out_file
def combine_count_files(files, out_file=None, ext=".fpkm"):
"""
combine a set of count files into a single combined file
"""
assert all([file_exists(x) for x in files]), \
"Some count files in %s do not exist." % files
for f in files:
assert file_exists(f), "%s does not exist or is empty." % f
col_names = [os.path.basename(x.split(ext)[0]) for x in files]
if not out_file:
out_dir = os.path.join(os.path.dirname(files[0]))
out_file = os.path.join(out_dir, "combined.counts")
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_table(f, sep="\t", index_col=0, header=None,
names=[col_names[0]])
for i, f in enumerate(files):
if i == 0:
df = pd.io.parsers.read_table(f, sep="\t", index_col=0, header=None,
names=[col_names[0]])
else:
df = df.join(pd.io.parsers.read_table(f, sep="\t", index_col=0,
header=None,
names=[col_names[i]]))
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def annotate_combined_count_file(count_file, gtf_file, out_file=None):
dbfn = gtf_file + ".db"
if not file_exists(dbfn):
return None
if not gffutils:
return None
db = gffutils.FeatureDB(dbfn, keep_order=True)
if not out_file:
out_dir = os.path.dirname(count_file)
out_file = os.path.join(out_dir, "annotated_combined.counts")
# if the genes don't have a gene_id or gene_name set, bail out
try:
symbol_lookup = {f['gene_id'][0]: f['gene_name'][0] for f in
db.features_of_type('exon')}
except KeyError:
return None
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=0)
df['symbol'] = df.apply(lambda x: symbol_lookup.get(x.name, ""), axis=1)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def htseq_reader(align_file):
"""
returns a read-by-read sequence reader for a BAM or SAM file
"""
if bam.is_sam(align_file):
read_seq = HTSeq.SAM_Reader(align_file)
elif bam.is_bam(align_file):
read_seq = HTSeq.BAM_Reader(align_file)
else:
logger.error("%s is not a SAM or BAM file" % (align_file))
sys.exit(1)
return read_seq
| mit |
carlthome/librosa | librosa/feature/utils.py | 1 | 8078 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Feature manipulation utilities"""
from warnings import warn
import numpy as np
import scipy.signal
from .._cache import cache
from ..util.exceptions import ParameterError
__all__ = ['delta', 'stack_memory']
@cache(level=40)
def delta(data, width=9, order=1, axis=-1, mode='interp', **kwargs):
r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> plt.show()
'''
data = np.atleast_1d(data)
if mode == 'interp' and width > data.shape[axis]:
raise ParameterError("when mode='interp', width={} "
"cannot exceed data.shape[axis]={}".format(width, data.shape[axis]))
if width < 3 or np.mod(width, 2) != 1:
raise ParameterError('width must be an odd integer >= 3')
if order <= 0 or not isinstance(order, int):
raise ParameterError('order must be a positive integer')
kwargs.pop('deriv', None)
kwargs.setdefault('polyorder', order)
return scipy.signal.savgol_filter(data, width,
deriv=order,
axis=axis,
mode=mode,
**kwargs)
@cache(level=40)
def stack_memory(data, n_steps=2, delay=1, **kwargs):
"""Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> plt.show()
"""
if n_steps < 1:
raise ParameterError('n_steps must be a positive integer')
if delay == 0:
raise ParameterError('delay must be a non-zero integer')
data = np.atleast_2d(data)
t = data.shape[1]
kwargs.setdefault('mode', 'constant')
if kwargs['mode'] == 'constant':
kwargs.setdefault('constant_values', [0])
# Pad the end with zeros, which will roll to the front below
if delay > 0:
padding = (int((n_steps - 1) * delay), 0)
else:
padding = (0, int((n_steps - 1) * -delay))
data = np.pad(data, [(0, 0), padding], **kwargs)
history = data
# TODO: this could be more efficient
for i in range(1, n_steps):
history = np.vstack([np.roll(data, -i * delay, axis=1), history])
# Trim to original width
if delay > 0:
history = history[:, :t]
else:
history = history[:, -t:]
# Make contiguous
return np.asfortranarray(history)
| isc |
michigraber/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
cavestruz/L500analysis | plotting/profiles/T_Vcirc_evolution/Vcirc_evolution/plot_Vcirc2_nu_binned_Vc500c.py | 1 | 3175 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.plotting.profiles.tools.select_profiles \
import nu_cut, prune_dict
from L500analysis.utils.constants import rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
nu_threshold = [2.3,2.7]
nu_label = r"%0.1f$\leq\nu_{500c}\leq$%0.1f"%(nu_threshold[0],nu_threshold[1])
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['r_mid',
'Vcirc2_Vc500c',
'M_dark', 'M_star', 'M_gas',
'R/R500c']
halo_properties_list=['r500c','M_total_500c','nu_500c']
Vcirc2ratioVc500c=r"$\tilde{V}=V^2_{c}/V^2_{c,500c}$"
fVcz1=r"$\tilde{V}/\tilde{V}(z=1)$"
pa = PlotAxes(figname='Vcirc2_Vc500c_nu%0.1f'%nu_threshold[0],
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Vcirc2ratioVc500c,fVcz1],
xlabel=r"$R/R_{500c}$",
xlim=(0.2,5),
ylims=[(0.6,1.4),(0.6,1.4)])
Vcirc2={}
clkeys = ['Vcirc2_Vc500c']
plots = [Vcirc2]
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
nu_cut_hids = nu_cut(nu=cldata['nu_500c'], threshold=nu_threshold)
for plot, key in zip(plots,clkeys) :
pruned_profiles = prune_dict(d=cldata[key],k=nu_cut_hids)
plot[aexp] = calculate_profiles_mean_variance(pruned_profiles)
pa.axes[Vcirc2ratioVc500c].plot( rbins, Vcirc2[aexp]['mean'],color=color(aexp),
ls='-',label="$z=%3.1f$" % aexp2redshift(aexp))
pa.axes[Vcirc2ratioVc500c].fill_between(rbins, Vcirc2[0.5]['down'], Vcirc2[0.5]['up'],
color=color(0.5), zorder=0)
for aexp in aexps :
for V,ls in zip(plots,linestyles) :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=V[aexp]['mean'],
var_profile1=V[aexp]['var'],
mean_profile2=V[0.5]['mean'],
var_profile2=V[0.5]['var'],
)
pa.axes[fVcz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls=ls)
pa.axes[Vcirc2ratioVc500c].annotate(nu_label, xy=(.75, .75), xytext=(.3, 1.3))
pa.axes[Vcirc2ratioVc500c].tick_params(labelsize=12)
pa.axes[Vcirc2ratioVc500c].tick_params(labelsize=12)
pa.axes[fVcz1].set_yticks(arange(0.6,1.4,0.2))
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
pa.set_legend(axes_label=Vcirc2ratioVc500c,ncol=3,loc='upper right', frameon=False)
pa.color_legend_texts(axes_label=Vcirc2ratioVc500c)
pa.savefig()
| mit |
soleneulmer/atmos | indicators_molec.py | 1 | 4324 | # ===================================
# CALCULATES Ioff and Ires
# Indicators described in Molecfit II
#
# Solene 20.09.2016
# ===================================
#
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
# from PyAstronomy import pyasl
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import stats
# from sklearn.metrics import mean_squared_error
# from math import sqrt
# from numpy import linalg as LA
# MOLECFIT
#
file_molecfit = '/home/solene/atmos/For_Solene/1203nm/output/molecfit_crires_solene_tac.fits'
hdu_molecfit = fits.open(file_molecfit)
data_molecfit = hdu_molecfit[1].data
cols_molecfit = hdu_molecfit[1].columns
# cols_molecfit.info()
rawwl_molecfit = data_molecfit.field('mlambda')
wl_molecfit = rawwl_molecfit*10e2
trans_molecfit = data_molecfit.field('mtrans')
cflux_molecfit = data_molecfit.field('cflux')
# TELFIT
#
file_telfit = '/home/solene/atmos/trans_telfit.txt'
wl_telfit, trans_telfit, wl_datatelfit, flux_datatelfit = np.loadtxt(
file_telfit, unpack=True)
# Interpolation
f_molecfit = interp1d(wl_molecfit, cflux_molecfit, kind='cubic')
ftrans_molecfit = interp1d(wl_molecfit, trans_molecfit, kind='cubic')
# f_tapas = interp1d(wlcorr_tapas, trans_tapas)
# **1** BINNED DATA
# 3 delta-lambda = 0.036
# Mean and std deviation of bins on the telluric CORRECTED spectrum
fluxmean_bin_means, bin_edges, binnumber = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
fluxstd_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, f_molecfit(wl_datatelfit), statistic=np.std,
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
# **2** Bins where average TRANSMISSION is > 0.99
flux_trans_mean_bin_means, _, _ = stats.binned_statistic(
wl_datatelfit, ftrans_molecfit(wl_datatelfit), statistic='mean',
bins=np.floor((wl_datatelfit[-1]-wl_datatelfit[0])/0.036))
# cont_bin_means = flux_trans_mean_bin_means[flux_trans_mean_bin_means > 0.99]
ind_cont = np.where(flux_trans_mean_bin_means > 0.99)
ind_out = np.where((flux_trans_mean_bin_means < 0.95) &
(flux_trans_mean_bin_means > 0.1))
# plt.plot(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], 'kx')
# **3** Interpolation of the continuum cubic
# f_cont = interp1d(bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], kind='cubic')
# Extrapolation with constant value spline
f_cont = InterpolatedUnivariateSpline(
bin_centers[ind_cont], flux_trans_mean_bin_means[ind_cont], ext=3)
# bbox=[bin_centers[ind_cont][0], bin_centers[ind_cont][-1]],
# **5** Subtract cont to mean flux
# and Divide offset and std by interpolated continuum mean value
sys_offset = (fluxmean_bin_means - f_cont(bin_centers)) / f_cont(bin_centers)
flux_std = fluxstd_bin_means / f_cont(bin_centers)
# **6** independant WL = Divide by average absorption
absorp_molecfit = 1 - flux_trans_mean_bin_means
sys_offset_final = sys_offset / absorp_molecfit
flux_std_final = flux_std / absorp_molecfit
plt.figure(1)
plt.plot(wl_datatelfit, flux_datatelfit, 'b.-', label='Raw data')
# plt.hlines(flux_bin_means, bin_edges[:-1],
# bin_edges[1:], colors='g', lw=5, label='binned statistic of data')
plt.plot(bin_centers, fluxmean_bin_means, 'rx-', label='Mean binned data')
plt.plot(bin_centers, fluxstd_bin_means, 'kx-', label='Standard deviation binned data')
plt.legend()
plt.figure(2)
plt.plot(wl_datatelfit, flux_datatelfit, 'g.-', label='Data 2nd detector')
plt.plot(wl_molecfit, trans_molecfit, 'r-', label='Molecfit')
plt.plot(wl_datatelfit, f_molecfit(wl_datatelfit),
'b-', label='Corrected data - Molecfit')
plt.plot(wl_datatelfit, f_cont(wl_datatelfit),
'k-', label='Interpolated Continuum')
plt.plot(sys_offset_final[ind_out], flux_std_final[ind_out], 'kx')
plt.plot(flux_trans_mean_bin_means[ind_out],
sys_offset_final[ind_out], 'kx', label='Ioff vs Transmission')
plt.plot(flux_trans_mean_bin_means[ind_out],
flux_std_final[ind_out], 'r.', label='Ires vs Transmission')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Transmission')
plt.legend(loc=3.)
plt.show()
| mit |
xuewei4d/scikit-learn | sklearn/decomposition/__init__.py | 14 | 1396 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from ._nmf import NMF, non_negative_factorization
from ._pca import PCA
from ._incremental_pca import IncrementalPCA
from ._kernel_pca import KernelPCA
from ._sparse_pca import SparsePCA, MiniBatchSparsePCA
from ._truncated_svd import TruncatedSVD
from ._fastica import FastICA, fastica
from ._dict_learning import (dict_learning, dict_learning_online,
sparse_encode, DictionaryLearning,
MiniBatchDictionaryLearning, SparseCoder)
from ._factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from ._lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
evidation-health/bokeh | bokeh/tests/test_sources.py | 26 | 3245 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSources(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
blaze/dask | dask/dataframe/hyperloglog.py | 3 | 2433 | """Implementation of HyperLogLog
This implements the HyperLogLog algorithm for cardinality estimation, found
in
Philippe Flajolet, Éric Fusy, Olivier Gandouet and Frédéric Meunier.
"HyperLogLog: the analysis of a near-optimal cardinality estimation
algorithm". 2007 Conference on Analysis of Algorithms. Nice, France
(2007)
"""
import numpy as np
import pandas as pd
from pandas.util import hash_pandas_object
def compute_first_bit(a):
"Compute the position of the first nonzero bit for each int in an array."
# TODO: consider making this less memory-hungry
bits = np.bitwise_and.outer(a, 1 << np.arange(32))
bits = bits.cumsum(axis=1).astype(bool)
return 33 - bits.sum(axis=1)
def compute_hll_array(obj, b):
# b is the number of bits
if not 8 <= b <= 16:
raise ValueError("b should be between 8 and 16")
num_bits_discarded = 32 - b
m = 1 << b
# Get an array of the hashes
hashes = hash_pandas_object(obj, index=False)
if isinstance(hashes, pd.Series):
hashes = hashes._values
hashes = hashes.astype(np.uint32)
# Of the first b bits, which is the first nonzero?
j = hashes >> num_bits_discarded
first_bit = compute_first_bit(hashes)
# Pandas can do the max aggregation
df = pd.DataFrame({"j": j, "first_bit": first_bit})
series = df.groupby("j").max()["first_bit"]
# Return a dense array so we can concat them and get a result
# that is easy to deal with
return series.reindex(np.arange(m), fill_value=0).values.astype(np.uint8)
def reduce_state(Ms, b):
m = 1 << b
# We concatenated all of the states, now we need to get the max
# value for each j in both
Ms = Ms.reshape((len(Ms) // m), m)
return Ms.max(axis=0)
def estimate_count(Ms, b):
m = 1 << b
# Combine one last time
M = reduce_state(Ms, b)
# Estimate cardinality, no adjustments
alpha = 0.7213 / (1 + 1.079 / m)
E = alpha * m / (2.0 ** -(M.astype("f8"))).sum() * m
# ^^^^ starts as unsigned, need a signed type for
# negation operator to do something useful
# Apply adjustments for small / big cardinalities, if applicable
if E < 2.5 * m:
V = (M == 0).sum()
if V:
return m * np.log(m / V)
if E > 2 ** 32 / 30.0:
return -(2 ** 32) * np.log1p(-E / 2 ** 32)
return E
| bsd-3-clause |
blekhmanlab/hominid | hominid/sort_results.py | 1 | 6152 | """
Read a rvcf file with stability selection scores for taxa.
Sort the dataframe by rsq_median.
Print results.
usage:
python sort_results.py \
../example/stability_selection_example_output.vcf \
../example/hominid_example_taxon_table_input.txt \
arcsinsqrt \
0.5 \
10
"""
import argparse
import sys
import pandas as pd
from hominid.hominid import read_taxon_file, align_snp_and_taxa
def sort_results(rvcf_input_file_path, taxon_table_file_path, transform,
r_sqr_median_cutoff, stability_cutoff, snp_count, no_tables,
extra_columns):
print('plotting {} SNPs from {}'.format(snp_count, rvcf_input_file_path))
# read the rvcf file and sort by rsq_median
df = pd.read_csv(rvcf_input_file_path, sep='\t', dtype={'CHROM': str})
#print('df.shape: {}'.format(df.shape))
sorted_rsq_best_medians_df = df.sort_values(by='rsq_median', ascending=False)
x_df = sorted_rsq_best_medians_df[sorted_rsq_best_medians_df.rsq_median > r_sqr_median_cutoff]
print('{} SNPs with r_sqr > {:5.3f}'.format(x_df.shape[0], r_sqr_median_cutoff))
taxon_table_df = read_taxon_file(taxon_table_file_path, transform=transform)
for row_i in range(sorted_rsq_best_medians_df.shape[0]):
if row_i >= snp_count:
break
else:
# get a 1-row dataframe
snp_df = sorted_rsq_best_medians_df.iloc[[row_i]]
aligned_snp_df, aligned_taxa_df = align_snp_and_taxa(
snp_df,
taxon_table_df
)
# get the taxon stability selection scores
# use the taxon table df index to get column names for snp_df
taxon_scores_df = snp_df.loc[:, taxon_table_df.index].transpose()
sorted_taxon_scores_df = taxon_scores_df.sort_values(by=taxon_scores_df.columns[0], ascending=False)
#sorted_taxon_scores_df = taxon_scores_df.sort(taxon_scores_df.columns[0], ascending=False)
p_df_list = []
print('{} {} {:5.3f}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID, snp_df.iloc[0].rsq_median))
summary_line = '{}\t{}\t'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID)
for i, (selected_taxon, selected_taxon_row) in enumerate(sorted_taxon_scores_df.iterrows()):
# use selected_taxon_row.index[0] to index the first and only column
selected_taxon_score = selected_taxon_row.iloc[0]
if selected_taxon_score < stability_cutoff:
#print('done with selected taxa')
break
else:
# trim 'Root;' from the front of the taxon name
if selected_taxon.startswith('Root;'):
taxon_name = selected_taxon[5:]
else:
taxon_name = selected_taxon
print(' {:5.3f} {}'.format(selected_taxon_score, taxon_name))
summary_line += '{}, '.format(taxon_name)
gts = [
snp_df.iloc[0].REF + snp_df.iloc[0].REF, # 0
snp_df.iloc[0].REF + snp_df.iloc[0].ALT, # 1
snp_df.iloc[0].ALT + snp_df.iloc[0].ALT # 2
]
aligned_snp_value_list = aligned_snp_df.values.flatten().tolist()
data_dict = {
'chromosome': [snp_df.iloc[0].CHROM] * aligned_snp_df.shape[1],
'snp_id': [snp_df.iloc[0].ID] * aligned_snp_df.shape[1],
'gene': [snp_df.iloc[0].GENE] * aligned_snp_df.shape[1],
'taxon': [selected_taxon] * aligned_snp_df.shape[1],
'abundance': aligned_taxa_df[selected_taxon].values.tolist(),
'variant_allele_count': [str(int(v)) for v in aligned_snp_value_list],
'genotype': [gts[int(v)] for v in aligned_snp_value_list],
'sample_id' : aligned_snp_df.columns
}
columns_to_display = ['abundance', 'variant_allele_count', 'genotype', 'sample_id']
if extra_columns:
for extra_column in extra_columns.split(','):
data_dict[extra_column] = snp_df.iloc[0][extra_column]
columns_to_display.append(extra_column)
p_df = pd.DataFrame(data_dict)
p_df_list.append(p_df)
if no_tables:
pass
else:
p_df[columns_to_display].to_csv(
sys.stdout,
sep='\t'
)
# save a stacked bar plot
if len(p_df_list) > 0:
file_name = 'stacked_bar_plot_selected_taxa_{}_{}.pdf'.format(
snp_df.iloc[0].GENE,
snp_df.iloc[0].ID
)
p_df = pd.concat(p_df_list, axis=0)
# at this point the index for p_df looks like
# 0...76.0...76.0...76
# replace the index
p_df.index = range(p_df.shape[0])
#p_df.to_csv(file_path, sep='\t')
stacked_bar_title = '{}\n{}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('rvcf_input_file_path')
argparser.add_argument('taxon_table_file_path')
argparser.add_argument('transform')
argparser.add_argument(
'r_sqr_median_cutoff',
type=float
)
argparser.add_argument(
'stability_cutoff',
type=float
)
argparser.add_argument(
'snp_count',
type=int
)
argparser.add_argument(
'--no-tables',
action='store_true'
)
argparser.add_argument(
'--extra-columns',
type=str
)
args = argparser.parse_args()
print(args)
sort_results(**vars(args))
if __name__ == '__main__':
main()
| mit |
pradyu1993/scikit-learn | sklearn/datasets/tests/test_lfw.py | 2 | 6778 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by runnning the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
# PIL is not properly installed, skip those tests
raise SkipTest
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write('Text file to be ignored by the dataset loader.')
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write("10\n")
more_than_two = [name for name, count in counts.iteritems()
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write('%s\t%d\t%d\n' % (name, first, second))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(range(counts[first_name]))
second_index = random_state.choice(range(counts[second_name]))
f.write('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write("Fake place holder that won't be tested")
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write("Fake place holder that won't be tested")
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
hyperspy/hyperspyUI | hyperspyui/plugins/mva.py | 2 | 15334 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 The HyperSpyUI developers
#
# This file is part of HyperSpyUI.
#
# HyperSpyUI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpyUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Fri Dec 12 23:44:01 2014
@author: Vidar Tonaas Fauske
"""
from hyperspyui.plugins.plugin import Plugin
from qtpy import QtCore, QtWidgets
from qtpy.QtWidgets import QDialog, QDialogButtonBox, QLineEdit, QLabel
from hyperspy.learn.mva import LearningResults
from hyperspyui.util import win2sig, fig2win, Namespace
from hyperspyui.threaded import ProgressThreaded, ProcessCanceled
from hyperspyui.widgets.extendedqwidgets import ExToolWindow
def tr(text):
return QtCore.QCoreApplication.translate("MVA", text)
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
if y2 > y1:
ratio = y1 / y2
else:
ratio = y2 / y1
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1 - y2))
miny2, maxy2 = ax2.get_ylim()
ax2.set_ylim((miny2 + dy) / ratio, (maxy2 + dy) / ratio)
def make_advanced_dialog(ui, algorithms=None):
diag = ExToolWindow(ui)
diag.setWindowTitle("Decomposition parameters")
vbox = QtWidgets.QVBoxLayout()
if algorithms:
lbl_algo = QLabel(tr("Choose algorithm:"))
cbo_algo = QLineEdit.QComboBox()
cbo_algo.addItems(algorithms)
vbox.addWidget(lbl_algo)
vbox.addWidget(cbo_algo)
else:
lbl_comp = QLabel(tr(
"Enter a comma-separated list of component numbers to use for "
"the model:"))
txt_comp = QLineEdit()
vbox.addWidget(lbl_comp)
vbox.addWidget(txt_comp)
btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal)
btns.accepted.connect(diag.accept)
btns.rejected.connect(diag.reject)
vbox.addWidget(btns)
diag.setLayout(vbox)
diag.algorithm = lambda: cbo_algo.currentText()
diag.components = lambda: [int(s) for s in txt_comp.text().split(',')]
return diag
class MVA_Plugin(Plugin):
"""
Implements MVA decomposition utilities.
"""
name = 'MVA' # Used for settings groups etc
coc_values = {'convert': tr("Convert"),
'copy': tr("Copy")}
# ----------- Plugin interface -----------
def create_actions(self):
self.settings.set_default('convert_or_copy', None)
self.settings.set_enum_hint('convert_or_copy',
self.coc_values.keys())
self.add_action('plot_decomposition_results',
tr("Decompose"),
self.plot_decomposition_results,
icon='pca.svg',
tip=tr("Decompose signal using Principle Component "
"analysis"),
selection_callback=self.selection_rules)
self.add_action('pca', tr("Decomposition model"), self.pca,
icon='pca.svg',
tip=tr("Create a Principal Component Analysis "
"decomposition model"),
selection_callback=self.selection_rules)
self.add_action('bss', tr("BSS"), self.bss,
icon='bss.svg',
tip=tr("Run Blind Source Separation"),
selection_callback=self.selection_rules)
self.add_action('bss_model', tr("BSS model"), self.bss_model,
icon='bss.svg',
tip=tr("Create a Blind Source Separation "
"decomposition model"),
selection_callback=self.selection_rules)
self.add_action('clear', tr("Clear"), self.clear,
tip=tr("Clear decomposition cache"),
selection_callback=self.selection_rules)
def create_menu(self):
self.add_menuitem('Decomposition',
self.ui.actions['plot_decomposition_results'])
self.add_menuitem('Decomposition', self.ui.actions['pca'])
self.add_menuitem('Decomposition', self.ui.actions['bss'])
self.add_menuitem('Decomposition', self.ui.actions['bss_model'])
self.add_menuitem('Decomposition', self.ui.actions['clear'])
def create_toolbars(self):
self.add_toolbar_button("Decomposition", self.ui.actions['pca'])
self.add_toolbar_button("Decomposition", self.ui.actions['bss'])
def selection_rules(self, win, action):
"""
Callback to determine if action is valid for the passed window.
"""
s = win2sig(win, self.ui.signals)
if s is None or s.signal.data.ndim <= 1:
action.setEnabled(False)
else:
action.setEnabled(True)
# ------------ Action implementations --------------
def _get_signal(self, signal):
"""
Get a valid signal. If the signal is none, it ues the currently
selected one. If the signal type is not float, it either converts it,
or gets a copy of the correct type, depending on the 'convert_copy'
setting.
"""
if signal is None:
signal = self.ui.get_selected_wrapper()
s = signal.signal
if s.data.dtype.char not in ['e', 'f', 'd']: # If not float
cc = self.settings.get_or_prompt(
'convert_or_copy',
[kv for kv in self.coc_values.items()],
title=tr("Convert or copy"),
descr=tr(
"Signal data has the wrong data type (float needed)." +
"Would you like to convert the current signal, or " +
"perform the decomposition on a copy?"))
if cc is None:
# User canceled
raise ProcessCanceled()
if cc == 'copy':
s = s.deepcopy()
s.metadata.General.title = signal.name + "[float]"
s.plot()
s.change_dtype(float)
return s, signal
def _do_decomposition(self, s, force=False, algorithm=None):
"""
Makes sure we have decomposition results. If results already are
available, it will only recalculate if the `force` parameter is True.
"""
if algorithm:
s.decomposition(algorithm=algorithm)
elif force or s.learning_results.explained_variance_ratio is None:
s.decomposition()
return s
def _do_bss(self, s, n_components, algorithm=None):
"""
Makes sure we have BSS results. If results already are available, it
will only recalculate if the `force` parameter is True.
"""
if algorithm:
s.blind_source_separation(n_components, algorithm=algorithm)
else:
s.blind_source_separation(n_components)
def get_bss_results(self, signal):
factors = signal.get_bss_factors()
loadings = signal.get_bss_loadings()
factors.axes_manager._axes[0] = loadings.axes_manager._axes[0]
return loadings, factors
def _record(self, autosig, model, signal, n_components):
if autosig:
self.record_code(r"<p>.{0}(n_components={1})".format(
model, n_components))
else:
self.record_code(r"<p>.{0}({1}, n_components={2})".format(
model, signal, n_components))
def _decompose_threaded(self, callback, label, signal=None,
algorithm=None, ns=None):
if ns is None:
ns = Namespace()
ns.autosig = signal is None
ns.s, signal = self._get_signal(signal)
def do_threaded():
ns.s = self._do_decomposition(ns.s, algorithm=algorithm)
def on_error(message=None):
em = QtWidgets.QErrorMessage(self.ui)
msg = tr("An error occurred during decomposition")
if message:
msg += ":\n" + message
em.setWindowTitle(tr("Decomposition error"))
em.showMessage(msg)
t = ProgressThreaded(self.ui, do_threaded, lambda: callback(ns),
label=label)
t.worker.error[str].connect(on_error)
t.run()
def _perform_model(self, ns, n_components):
# Num comp. picked, get model, wrap new signal and plot
if ns.model == 'pca':
sc = ns.s.get_decomposition_model(n_components)
sc.metadata.General.title = ns.signal.name + "[PCA-model]"
sc.plot()
elif ns.model == 'bss' or ns.model.startswith('bss.'):
if ns.model.startswith('bss.'):
algorithm = ns.model[len('bss.'):]
self._do_bss(ns.s, n_components, algorithm=algorithm)
else:
self._do_bss(ns.s, n_components)
f, o = self.get_bss_results(ns.s)
o.metadata.add_dictionary(ns.s.metadata.as_dictionary())
f.metadata.General.title = ns.signal.name + "[BSS-Factors]"
o.metadata.General.title = ns.signal.name + "[BSS-Loadings]"
f.plot()
o.plot()
elif ns.model == 'bss_model':
# Here we have to assume the user has actually performed the BSS
# decomposition first!
sc = ns.s.get_bss_model(n_components)
sc.metadata.General.title = ns.signal.name + "[BSS-model]"
sc.plot()
if not ns.recorded:
self._record(ns.autosig, ns.model, ns.signal, n_components)
def _show_scree(self, ns, callback):
ax = ns.s.plot_explained_variance_ratio()
# Clean up plot and present, allow user to select components
# by picker
ax.set_title("")
scree = ax.get_figure().canvas
scree.draw()
scree.setWindowTitle("Pick number of components")
def clicked(event):
n_components = int(round(event.xdata))
# Close scree plot
w = fig2win(scree.figure, self.ui.figures)
w.close()
callback(ns, n_components)
scree.mpl_connect('button_press_event', clicked)
def do_after_scree(self, model, signal=None, n_components=None):
"""
Performs decomposition, then plots the scree for the user to select
the number of components to use for a decomposition model. The
selection is made by clicking on the scree, which closes the scree
and creates the model.
"""
ns = Namespace()
ns.autosig = signal is None
ns.model = model
ns.s, ns.signal = self._get_signal(signal)
if n_components is not None:
self._record(ns.autosig, ns.model, ns.signal, n_components)
ns.recorded = True
else:
ns.recorded = False
def on_complete(ns):
if n_components is None:
self._show_scree(ns, self._perform_model)
else:
self._perform_model(ns, n_components)
self._decompose_threaded(on_complete, "Performing %s" % model.upper(),
n_components, ns=ns)
def plot_decomposition_results(self, signal=None, advanced=False):
"""
Performs decomposition if necessary, then plots the decomposition
results according to the hyperspy implementation.
"""
def on_complete(ns):
ns.s.plot_decomposition_results()
# Somewhat speculative workaround to HSPY not adding metadata
sd = self.ui.hspy_signals[-1]
sd.metadata.add_dictionary(ns.s.metadata.as_dictionary())
if advanced:
diag = make_advanced_dialog(
self.ui, ['svd', 'fast_svd', 'mlpca', 'fast_mlpca', 'nmf',
'sparse_pca', 'mini_batch_sparse_pca'])
dr = diag.exec_()
if dr == QDialog.Accepted:
self._decompose_threaded(
on_complete, "Decomposing signal",
algorithm=diag.algorithm())
else:
self._decompose_threaded(on_complete, "Decomposing signal")
def pca(self, signal=None, n_components=None, advanced=False):
"""
Performs decomposition, then plots the scree for the user to select
the number of components to use for a decomposition model. The
selection is made by clicking on the scree, which closes the scree
and creates the model.
"""
if advanced:
diag = make_advanced_dialog(self.ui)
dr = diag.exec_()
if dr == QDialog.Accepted:
self.do_after_scree(
'pca', signal, n_components=diag.components())
else:
self.do_after_scree('pca', signal, n_components)
def bss(self, signal=None, n_components=None, advanced=False):
"""
Performs decomposition if neccessary, then plots the scree for the user
to select the number of components to use for a blind source
separation. The selection is made by clicking on the scree, which
closes the scree and creates the model.
"""
if advanced:
diag = make_advanced_dialog(
self.ui, ['orthomax', 'sklearn_fastica', 'FastICA', 'JADE',
'CuBICA', 'TDSEP'])
dr = diag.exec_()
if dr == QDialog.Accepted:
model = 'bss.' + diag.algorithm()
self.do_after_scree(model, signal, n_components)
else:
self.do_after_scree('bss', signal, n_components)
def bss_model(self, signal=None, n_components=None, advanced=False):
"""
Performs decomposition if neccessary, then plots the scree for the user
to select the number of components to use for a blind source
separation model. The selection is made by clicking on the scree, which
closes the scree and creates the model.
"""
if advanced:
diag = make_advanced_dialog(self.ui)
dr = diag.exec_()
if dr == QDialog.Accepted:
self.do_after_scree(
'bss_model', signal, n_components=diag.components())
else:
self.do_after_scree('bss_model', signal, n_components)
def clear(self, signal=None):
"""
Clears the learning results from the signal.
"""
if signal is None:
signal = self.ui.get_selected_signal()
signal.learning_results = LearningResults()
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/misc/rasterization_demo.py | 6 | 1257 | import numpy as np
import matplotlib.pyplot as plt
d = np.arange(100).reshape(10, 10)
x, y = np.meshgrid(np.arange(11), np.arange(11))
theta = 0.25*np.pi
xx = x*np.cos(theta) - y*np.sin(theta)
yy = x*np.sin(theta) + y*np.cos(theta)
ax1 = plt.subplot(221)
ax1.set_aspect(1)
ax1.pcolormesh(xx, yy, d)
ax1.set_title("No Rasterization")
ax2 = plt.subplot(222)
ax2.set_aspect(1)
ax2.set_title("Rasterization")
m = ax2.pcolormesh(xx, yy, d)
m.set_rasterized(True)
ax3 = plt.subplot(223)
ax3.set_aspect(1)
ax3.pcolormesh(xx, yy, d)
ax3.text(0.5, 0.5, "Text", alpha=0.2,
va="center", ha="center", size=50, transform=ax3.transAxes)
ax3.set_title("No Rasterization")
ax4 = plt.subplot(224)
ax4.set_aspect(1)
m = ax4.pcolormesh(xx, yy, d)
m.set_zorder(-20)
ax4.text(0.5, 0.5, "Text", alpha=0.2,
zorder=-15,
va="center", ha="center", size=50, transform=ax4.transAxes)
ax4.set_rasterization_zorder(-10)
ax4.set_title("Rasterization z$<-10$")
# ax2.title.set_rasterized(True) # should display a warning
plt.savefig("test_rasterization.pdf", dpi=150)
plt.savefig("test_rasterization.eps", dpi=150)
if not plt.rcParams["text.usetex"]:
plt.savefig("test_rasterization.svg", dpi=150)
# svg backend currently ignores the dpi
| gpl-2.0 |
fzalkow/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
jmchen-g/models | autoencoder/MaskingNoiseAutoencoderRunner.py | 10 | 1689 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
dropout_probability = 0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| apache-2.0 |
hugohmk/Epidemic-Emulator | main.py | 1 | 7208 | from epidemic_emulator import node
from datetime import datetime
import platform
import argparse
import time
import os
import matplotlib.pyplot as plt
import random
def parse_network(f, node_id, topology = "clique"):
neighbors = []
nd = None
t = datetime.now()
t = t-t
net = []
index = -1
cnt = 0
for i in f:
i = i.rstrip("\n").split("|")
if len(i)<4:
continue
u = (i[0],(i[1],int(i[2])),[(i[3],t)])
if i[0]==node_id:
nd = u
index = cnt
net.append(u)
cnt+=1
f.close()
# clique
if topology == "clique":
neighbors = [i for i in net if i[0] != node_id]
# star
elif topology == "star":
if index > 0:
neighbors = [net[0]]
else:
neighbors = net[1:]
return neighbors,nd
def simulation_controller(args,nd,network):
# Example nd value:
#('9', ('127.0.0.1', 9179), [('S', datetime.timedelta(0))])
#
# network is a tuple containing every node identifier constructed from
# args.network (default=network.txt) file
r = args.recovery_rate
e = args.endogenous_rate
x = args.exogenous_rate
if nd is not None:
with node.Node(r,e,x) as a:
a.start(nd, network)
if args.interaction == 1:
try:
help_text = """>> Commands:
0 (help) -> print this
1 (print current) -> print current network state
2 (print history) -> print network history
3 (end) -> send shutdown message to all nodes
4 (display state) -> display current network state
5 (display history) -> display network history
"""
print help_text
while True:
opt = raw_input(">> Insert command: ")
if opt == "0":
print help_text
elif opt == "1":
#print a.network_state(),"\n"
a.print_state()
elif opt == "2":
#print a.network_history(),"\n"
a.print_history()
elif opt == "3":
a.display_history()
a.network_shutdown()
a.stop()
break
elif opt == "4":
a.display_state()
elif opt == "5":
a.display_history()
else:
print "Invalid input\n"
except:
a.network_shutdown()
a.stop()
finally:
a.network_shutdown()
a.stop()
elif args.interaction > 1:
print("Running simulation for %d seconds." % args.interaction)
time.sleep(args.interaction)
#a.display_history()
simdata = a.save_simulation_data()
a.network_shutdown()
a.stop()
return simdata
else:
try:
while not a.stopped():
time.sleep(2)
except:
a.stop()
finally:
a.stop()
def process_data(simdata,repetitions,simulation_time):
simresults = [[-1 for t in range(simulation_time+1)] for x in range(repetitions)]
print_stuff = 1
for k in range(repetitions):
if print_stuff:
print("")
print("Run #%d" % (k+1))
print("time\tinfected count")
t = 0
for event in simdata[k]:
if print_stuff: print("%.2f\t%d" % (event[0],event[1]))
time = int(event[0])
infected_count = event[1]
if time < t:
continue
elif t < simulation_time+1:
if print_stuff: print("* %.2f" % event[0])
while t <= time:
simresults[k][t] = infected_count
t = t+1
while t < simulation_time+1:
simresults[k][t] = infected_count
t = t+1
if print_stuff:
print("")
print("Processed output:")
print("time\tinfected count")
for t in range(simulation_time+1):
print("%d\t%d" % (t,simresults[k][t]))
average_results = [0.0 for t in range(simulation_time+1)]
for t in range(simulation_time+1):
for k in range(repetitions):
average_results[t] = average_results[t] + simresults[k][t]
average_results[t] = float(average_results[t]) / repetitions
print(average_results)
plt.plot(list(range(0,simulation_time+1)),average_results,'-o')
axes = plt.gca()
axes.set_xlim([0,simulation_time])
#axes.set_ylim([0,10])
plt.xlabel("Seconds")
plt.ylabel("Infected nodes")
plt.savefig("average_simulation.pdf")
if __name__ == "__main__":
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path_unix = dir_path.replace("\\","/")
if (platform.system()!="Windows"): dir_path = dir_path_unix
parser = argparse.ArgumentParser()
parser.add_argument("-id","--identifier",required=True,
help="Node identifier")
parser.add_argument("-n","--network",type=argparse.FileType('r'), default = dir_path_unix+"/network.txt",
help="File that contains the network's description; each line presents node_id|node_ip|port_number|initial_state")
# parser.add_argument("-i","--interactive",type=int,default=0,
# help="Interactive mode")
parser.add_argument("-i","--interaction",type=int,default=0,
help="Interaction mode: default (0), interactive (1), simulation (2)")
parser.add_argument("-r","--recovery_rate",type=float,#default=1.0,
help="Simulation parameter: recovery_rate")
parser.add_argument("-e","--endogenous_rate",type=float,#default=1.0,
help="Simulation parameter: endogenous_infection_rate")
parser.add_argument("-x","--exogenous_rate",type=float,#default=1e-6,
help="Simulation parameter: exogenous_infection_rate")
parser.add_argument("-t","--topology",choices=["clique","star"],default="clique",
help="Network topology: clique or star")
args = parser.parse_args()
network = {}
if args.network is not None:
network,nd = parse_network(args.network, args.identifier, args.topology)
simulation_time = args.interaction
repetitions = 1
simdata = []
for i in range(repetitions):
simdata.append(simulation_controller(args,nd,network))
if args.identifier == '0':
process_data(simdata,repetitions,simulation_time)
| mit |
spallavolu/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
vascotenner/holoviews | holoviews/plotting/mpl/annotation.py | 1 | 3913 | import matplotlib
from matplotlib import patches as patches
from ...core.util import match_spec
from ...core.options import abbreviated_exception
from .element import ElementPlot
class AnnotationPlot(ElementPlot):
"""
AnnotationPlot handles the display of all annotation elements.
"""
def __init__(self, annotation, **params):
self._annotation = annotation
super(AnnotationPlot, self).__init__(annotation, **params)
self.handles['annotations'] = []
def initialize_plot(self, ranges=None):
annotation = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(annotation, ranges)
axis = self.handles['axis']
opts = self.style[self.cyclic_index]
with abbreviated_exception():
handles = self.draw_annotation(axis, annotation.data, opts)
self.handles['annotations'] = handles
return self._finalize_axis(key, ranges=ranges)
def update_handles(self, key, axis, annotation, ranges, style):
# Clear all existing annotations
for element in self.handles['annotations']:
element.remove()
with abbreviated_exception():
self.handles['annotations'] = self.draw_annotation(axis, annotation.data, style)
class VLinePlot(AnnotationPlot):
"Draw a vertical line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
return [axis.axvline(position, **opts)]
class HLinePlot(AnnotationPlot):
"Draw a horizontal line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
"Draw a horizontal line on the axis"
return [axis.axhline(position, **opts)]
class TextPlot(AnnotationPlot):
"Draw the Text annotation object"
style_opts = ['alpha', 'color', 'family', 'weight', 'rotation', 'fontsize', 'visible']
def draw_annotation(self, axis, data, opts):
(x,y, text, fontsize,
horizontalalignment, verticalalignment, rotation) = data
opts['fontsize'] = fontsize
return [axis.text(x,y, text,
horizontalalignment = horizontalalignment,
verticalalignment = verticalalignment,
rotation=rotation, **opts)]
class ArrowPlot(AnnotationPlot):
"Draw an arrow using the information supplied to the Arrow annotation"
_arrow_style_opts = ['alpha', 'color', 'lw', 'linewidth', 'visible']
_text_style_opts = TextPlot.style_opts
style_opts = sorted(set(_arrow_style_opts + _text_style_opts))
def draw_annotation(self, axis, data, opts):
direction, text, xy, points, arrowstyle = data
arrowprops = dict({'arrowstyle':arrowstyle},
**{k: opts[k] for k in self._arrow_style_opts if k in opts})
textopts = {k: opts[k] for k in self._text_style_opts if k in opts}
if direction in ['v', '^']:
xytext = (0, points if direction=='v' else -points)
elif direction in ['>', '<']:
xytext = (points if direction=='<' else -points, 0)
return [axis.annotate(text, xy=xy, textcoords='offset points',
xytext=xytext, ha="center", va="center",
arrowprops=arrowprops, **textopts)]
class SplinePlot(AnnotationPlot):
"Draw the supplied Spline annotation (see Spline docstring)"
style_opts = ['alpha', 'edgecolor', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, data, opts):
verts, codes = data
patch = patches.PathPatch(matplotlib.path.Path(verts, codes),
facecolor='none', **opts)
axis.add_patch(patch)
return [patch]
| bsd-3-clause |
GkAntonius/feynman | examples/Solid_State_Physics/plot_eph.py | 2 | 1265 | """
Electron-phonon coupling self-energy
====================================
A diagram containing loopy lines.
"""
from feynman import Diagram
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,2))
ax = fig.add_axes([0,0,1,1], frameon=False)
ax.set_xlim(0, fig.get_size_inches()[0])
ax.set_ylim(0, fig.get_size_inches()[1])
# Init D and ax
D = Diagram(ax)
D.x0 = 0.2
D.y0 = sum(D.ax.get_ylim()) * .35
# Various size
opwidth = 1.
linlen = 2.
txtpad = .8
wiggle_amplitude=.1
# Line styles
Ph_style = dict(style='elliptic loopy', ellipse_spread=.6, xamp=.10, yamp=-.15, nloops=15)
DW_style = dict(style='circular loopy', circle_radius=.7, xamp=.10, yamp=.15, nloops=18)
G_style = dict(style='simple', arrow=True, arrow_param={'width':0.15, 'length': .3})
# Item 1
v11 = D.vertex([D.x0, D.y0])
v12 = D.vertex(v11.xy, dx=opwidth)
Sigma = D.operator([v11, v12])
Sigma.text("$\Sigma^{ep}$")
# Symbol
D.text(v12.x + txtpad, D.y0, "=")
# Item 3
v21 = D.vertex([v12.x + 2 * txtpad, D.y0 - 0.3])
v22 = D.vertex(v21.xy, dx=linlen)
G = D.line(v21, v22, **G_style)
Ph = D.line(v21, v22, **Ph_style)
# Symbol
D.text(v22.x + txtpad, D.y0, "+")
# Item 3
v31 = D.vertex([v22.x + 3 * txtpad, D.y0 - 0.3])
DW = D.line(v31, v31, **DW_style)
D.plot()
plt.show()
| gpl-3.0 |
ebrensi/registry-frontend | ff.py | 1 | 1240 | #! usr/bin/env python
# This script is for testing without having to host the flask app.
import folium
import pandas as pd
import os
from sqlalchemy import create_engine
import geojson
DATABASE_URL = os.environ["DATABASE_URL"]
STATES_GEOJSON_PATH = "static/us-states.json"
engine = create_engine(DATABASE_URL)
with engine.connect() as db:
query = "Select state, count(*) From registry Group By state;"
df = pd.read_sql_query(query, db)
with open(STATES_GEOJSON_PATH, "r") as file:
gj = geojson.load(file)
# Folium choropleth requires a one-to-one correspondence between GeoJSON
# features (state definitions) and shade values, so we will make a new
# GeoJSON object that is a FeatureCollection of only the states that we
# have data for.
relevant_features = [feature for feature in gj["features"]
if ("id" in feature) and
(feature["id"] in df["state"].values)]
gj_relevant = geojson.FeatureCollection(relevant_features)
geo_str = geojson.dumps(gj_relevant)
base_map = folium.Map([43, -100], zoom_start=5)
base_map.choropleth(
geo_str=geo_str,
data=df,
columns=['state', 'count'],
key_on='feature.id',
fill_color='PuBuGn',
)
base_map.save("map.html")
| mit |
LaRiffle/axa_challenge | fonction_py/train.py | 1 | 12400 | from fonction_py.tools import *
from fonction_py.preprocess import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
from fonction_py.tim import *
import time
def faireTout():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
<<<<<<< HEAD
listmodel = faireListModel()#recupere le nom et les modeles de chaque truc
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE du fichier de train,
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE dufichier de test
res=[]
model = listmodel[0]
for model in listmodel:
print(model[0]) #affiche le ass assignment
(xTest, x, souvenir, y)=preprocessTOTAL(model[0]) # ajuste le nombre et le nom de feature pour que xTest et x aient les memes
mod= GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9)
mod.fit(x, y) #s'entraine
pred = mod.predict(xTest) # predit
pred[pred>max(y)*1.05]=max(y)*1.05 # pour pas predire trop grand
pred[pred<0]=0 # pas de negatif
pred =np.round(pred).astype(int) # to int
souvenir['prediction']=pred # on l'ajoute a souvenir qui garde le format standard et la date pour qu'on remette tout a la bonne place a la fin
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT']) # on remet chaque prediction à la bonne ligne -> il cree prediction_x et prediction_y car l'ancienne prediction et la nouvelle colonne de prediction
resultat=resultat.fillna(0) # on remplit les endroits ou on a pas predit avec des 0
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y'] # merge les deux colonnes
del resultat['prediction_x']
del resultat['prediction_y']
=======
listmodel = faireListModel()
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
# resultat = pd.read_csv("data/submission.txt", sep="\t")
i=0
# res = []
start_time = time.time()
model = listmodel[24]
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
res=[]
for model in listmodel:
i = i+1
print(model[0])
x,y = preprocess(data.copy(), model[0]) # rajoute les features
model[1].fit(x, y)
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,model[0])
pred = model[1].predict(xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
x,y = preprocess(data.copy(), 'Téléphonie') # rajoute les features
#model.score(xTrain, yTrain)
(xTest, souvenir)=preprocessFINAL(x,'Téléphonie')
pred=telephoniePred(x,y,xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=int(pred)
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
<<<<<<< HEAD
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
=======
>>>>>>> origin/master
resultat['prediction']=resultat['prediction'].astype(int)
resultat.to_csv("pouranalyse.txt", sep="\t", index =False, encoding='utf-8')
>>>>>>> origin/master
return resultat
def faireListModel():
return [('CAT', linear_model.LinearRegression()),
('CMS', RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Crises',linear_model.LinearRegression()),
('Domicile', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=90, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion - Accueil Telephonique',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=70, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Assurances',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=20, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Clients', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion DZ', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Relation Clienteles',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=110, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Renault', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=50, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Japon',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Manager',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Mécanicien',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Médical',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Nuit', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Prestataires',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RENAULT',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=80,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RTC',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Regulation Medicale',linear_model.LinearRegression()),
('SAP',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Services',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Axa',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Inter',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Total',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=70,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Téléphonie',GradientBoostingRegressor(loss='huber', alpha=0.9,n_estimators=100, max_depth=3,learning_rate=.1, min_samples_leaf=9,min_samples_split=9) )] | mit |