repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
CodingCat/mxnet | example/recommenders/movielens_data.py | 19 | 2419 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MovieLens data handling: download, parse, and expose as DataIter
"""
import os
import mxnet as mx
def load_mldata_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
"""
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return mx.io.NDArrayIter(data={'user':user,'item':item},label={'score':score},
batch_size=batch_size, shuffle=True)
def ensure_local_data(prefix):
if not os.path.exists("%s.zip" % prefix):
print("Downloading MovieLens data: %s" % prefix)
os.system("wget http://files.grouplens.org/datasets/movielens/%s.zip" % prefix)
os.system("unzip %s.zip" % prefix)
def get_data_iter(batch_size, prefix='ml-100k'):
"""Returns a pair of NDArrayDataIter, one for train, one for test.
"""
ensure_local_data(prefix)
return (load_mldata_iter('./%s/u1.base' % prefix, batch_size),
load_mldata_iter('./%s/u1.test' % prefix, batch_size))
def max_id(fname):
mu = 0
mi = 0
for line in open(fname):
tks = line.strip().split('\t')
if len(tks) != 4:
continue
mu = max(mu, int(tks[0]))
mi = max(mi, int(tks[1]))
return mu + 1, mi + 1
| apache-2.0 |
arabenjamin/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 248 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
michellemorales/OpenMM | models/lfads/synth_data/generate_chaotic_rnn_data.py | 11 | 8412 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
import tensorflow as tf # used for flags here
from utils import write_datasets
from synthetic_data_utils import add_alignment_projections, generate_data
from synthetic_data_utils import generate_rnn, get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, gaussify_data, split_list_by_inds
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
matplotlib.rcParams['image.interpolation'] = 'nearest'
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "thits_data",
"Name of data file for input case.")
flags.DEFINE_string("noise_type", "poisson", "Noise type for data.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 100, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_integer("S", 50, "Number of sampled units from RNN")
flags.DEFINE_integer("npcs", 10, "Number of PCS for multi-session case.")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 40,
"Number of noise replications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("input_magnitude", 20.0,
"For the input case, what is the value of the input?")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
FLAGS = flags.FLAGS
# Note that with N small, (as it is 25 above), the finite size effects
# will have pretty dramatic effects on the dynamics of the random RNN.
# If you want more complex dynamics, you'll have to run the script a
# lot, or increase N (or g).
# Getting hard vs. easy data can be a little stochastic, so we set the seed.
# Pull out some commonly used parameters.
# These are user parameters (configuration)
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
S = FLAGS.S
input_magnitude = FLAGS.input_magnitude
nreplications = FLAGS.nreplications
E = nreplications * C # total number of trials
# S is the number of measurements in each datasets, w/ each
# dataset having a different set of observations.
ndatasets = N/S # ok if rounded down
train_percentage = FLAGS.train_percentage
ntime_steps = int(T / FLAGS.dt)
# End of user parameters
rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate)
# Check to make sure the RNN is the one we used in the paper.
if N == 50:
assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?'
rem_check = nreplications * train_percentage
assert abs(rem_check - int(rem_check)) < 1e-8, \
'Train percentage * nreplications should be integral number.'
# Initial condition generation, and condition label generation. This
# happens outside of the dataset loop, so that all datasets have the
# same conditions, which is similar to a neurophys setup.
condition_number = 0
x0s = []
condition_labels = []
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nreplications)) # replicate x0 nreplications times
# replicate the condition label nreplications times
for ns in range(nreplications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
# Containers for storing data across data.
datasets = {}
for n in range(ndatasets):
print(n+1, " of ", ndatasets)
# First generate all firing rates. in the next loop, generate all
# replications this allows the random state for rate generation to be
# independent of n_replications.
dataset_name = 'dataset_N' + str(N) + '_S' + str(S)
if S < N:
dataset_name += '_n' + str(n+1)
# Sample neuron subsets. The assumption is the PC axes of the RNN
# are not unit aligned, so sampling units is adequate to sample all
# the high-variance PCs.
P_sxn = np.eye(S,N)
for m in range(n):
P_sxn = np.roll(P_sxn, S, axis=1)
if input_magnitude > 0.0:
# time of "hits" randomly chosen between [1/4 and 3/4] of total time
input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4)
else:
input_times = None
rates, x0s, inputs = \
generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn,
input_magnitude=input_magnitude,
input_times=input_times)
if FLAGS.noise_type == "poisson":
noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
elif FLAGS.noise_type == "gaussian":
noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
else:
raise ValueError("Only noise types supported are poisson or gaussian")
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
# Split the data, inputs, labels and times into train vs. validation.
rates_train, rates_valid = \
split_list_by_inds(rates, train_inds, valid_inds)
noisy_data_train, noisy_data_valid = \
split_list_by_inds(noisy_data, train_inds, valid_inds)
input_train, inputs_valid = \
split_list_by_inds(inputs, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = \
split_list_by_inds(condition_labels, train_inds, valid_inds)
input_times_train, input_times_valid = \
split_list_by_inds(input_times, train_inds, valid_inds)
# Turn rates, noisy_data, and input into numpy arrays.
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
noisy_data_train = nparray_and_transpose(noisy_data_train)
noisy_data_valid = nparray_and_transpose(noisy_data_valid)
input_train = nparray_and_transpose(input_train)
inputs_valid = nparray_and_transpose(inputs_valid)
# Note that we put these 'truth' rates and input into this
# structure, the only data that is used in LFADS are the noisy
# data e.g. spike trains. The rest is either for printing or posterity.
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
'input_train_truth' : input_train,
'input_valid_truth' : inputs_valid,
'train_data' : noisy_data_train,
'valid_data' : noisy_data_valid,
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : rnn['dt'],
'input_magnitude' : input_magnitude,
'input_times_train' : input_times_train,
'input_times_valid' : input_times_valid,
'P_sxn' : P_sxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn['conversion_factor']}
datasets[dataset_name] = data
if S < N:
# Note that this isn't necessary for this synthetic example, but
# it's useful to see how the input factor matrices were initialized
# for actual neurophysiology data.
datasets = add_alignment_projections(datasets, npcs=FLAGS.npcs)
# Write out the datasets.
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
| gpl-2.0 |
heli522/scikit-learn | sklearn/metrics/__init__.py | 212 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
tdeboissiere/DeepLearningImplementations | ScatteringTransform/test/utils_pytorch.py | 1 | 11306 | """
Copyright (c) 2017, Eugene Belilovsky (INRIA), Edouard Oyallon (ENS) and Sergey Zagoruyko (ENPC)
All rights reserved.
"""
from collections import defaultdict, namedtuple
import torch
from skcuda import cublas, cufft
from pynvrtc.compiler import Program
import numpy as np
from cupy.cuda.function import Module
from cupy.cuda import device
from string import Template
Stream = namedtuple('Stream', ['ptr'])
def getDtype(t):
if isinstance(t, torch.cuda.FloatTensor):
return 'float'
elif isinstance(t, torch.cuda.DoubleTensor):
return 'double'
def get_compute_arch(t):
return 'compute_%s' % device.Device().compute_capability
def iscomplex(input):
return input.size(-1) == 2
class Periodize(object):
"""This class builds a wrapper to the periodiziation kernels and cache them.
"""
def __init__(self, jit=True):
self.periodize_cache = defaultdict(lambda: None)
self.block = (32, 32, 1)
self.jit = jit
def GET_BLOCKS(self, N, threads):
return (N + threads - 1) // threads
def __call__(self, input, k):
out = input.new(input.size(0), input.size(1), input.size(2) // k, input.size(3) // k, 2)
if not self.jit or isinstance(input, (torch.FloatTensor, torch.DoubleTensor)):
y = input.view(input.size(0), input.size(1),
input.size(2)//out.size(2), out.size(2),
input.size(3)//out.size(3), out.size(3),
2)
out = y.mean(4).squeeze(4).mean(2).squeeze(2)
return out
if not iscomplex(input):
raise (TypeError('The input and outputs should be complex'))
input = input.contiguous()
if (self.periodize_cache[(input.size(), out.size(), input.get_device())] is None):
kernel = '''
#define NW ${W} / ${k}
#define NH ${H} / ${k}
extern "C"
__global__ void periodize(const ${Dtype}2 *input, ${Dtype}2 *output)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = blockIdx.z * blockDim.z + threadIdx.z;
if(tx >= NW || ty >= NH || tz >= ${B})
return;
input += tz * ${H} * ${W} + ty * ${W} + tx;
${Dtype}2 res = make_${Dtype}2(0.f, 0.f);
for (int j=0; j<${k}; ++j)
for (int i=0; i<${k}; ++i)
{
const ${Dtype}2 &c = input[j * NH * ${W} + i * NW];
res.x += c.x;
res.y += c.y;
}
res.x /= ${k} * ${k};
res.y /= ${k} * ${k};
output[tz * NH * NW + ty * NW + tx] = res;
}
'''
B = input.nelement() // (2*input.size(-2) * input.size(-3))
W = input.size(-2)
H = input.size(-3)
k = input.size(-2) // out.size(-2)
kernel = Template(kernel).substitute(B=B, H=H, W=W, k=k, Dtype=getDtype(input))
name = str(input.get_device())+'-'+str(B)+'-'+str(k)+'-'+str(H)+'-'+str(W)+'-periodize.cu'
print(name)
prog = Program(kernel, name.encode())
ptx = prog.compile(['-arch='+get_compute_arch(input)])
module = Module()
module.load(bytes(ptx.encode()))
self.periodize_cache[(input.size(), out.size(), input.get_device())] = module
grid = (self.GET_BLOCKS(out.size(-3), self.block[0]),
self.GET_BLOCKS(out.size(-2), self.block[1]),
self.GET_BLOCKS(out.nelement() // (2*out.size(-2) * out.size(-3)), self.block[2]))
periodize = self.periodize_cache[(input.size(), out.size(), input.get_device())].get_function('periodize')
periodize(grid=grid, block=self.block, args=[input.data_ptr(), out.data_ptr()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return out
class Modulus(object):
"""This class builds a wrapper to the moduli kernels and cache them.
"""
def __init__(self, jit=True):
self.modulus_cache = defaultdict(lambda: None)
self.CUDA_NUM_THREADS = 1024
self.jit = jit
def GET_BLOCKS(self, N):
return (N + self.CUDA_NUM_THREADS - 1) // self.CUDA_NUM_THREADS
def __call__(self, input):
if not self.jit or not isinstance(input, torch.cuda.FloatTensor):
norm = input.norm(2, input.dim() - 1)
return torch.cat([norm, norm.new(norm.size()).zero_()], input.dim() - 1)
out = input.new(input.size())
input = input.contiguous()
if not iscomplex(input):
raise TypeError('The input and outputs should be complex')
if (self.modulus_cache[input.get_device()] is None):
kernel = b"""
extern "C"
__global__ void abs_complex_value(const float * x, float2 * z, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
z[i] = make_float2(normf(2, x + 2*i), 0);
}
"""
print('modulus.cu')
prog = Program(kernel, b'modulus.cu')
ptx = prog.compile(['-arch='+get_compute_arch(input)])
module = Module()
module.load(bytes(ptx.encode()))
self.modulus_cache[input.get_device()] = module
fabs = self.modulus_cache[input.get_device()].get_function('abs_complex_value')
fabs(grid=(self.GET_BLOCKS(int(out.nelement())//2), 1, 1),
block=(self.CUDA_NUM_THREADS, 1, 1),
args=[input.data_ptr(), out.data_ptr(), out.numel() // 2],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
return out
class Fft(object):
"""This class builds a wrapper to the FFTs kernels and cache them.
As a try, the library will purely work with complex data. The FFTS are UNORMALIZED.
"""
def __init__(self):
self.fft_cache = defaultdict(lambda: None)
def buildCache(self, input, type):
k = input.ndimension() - 3
n = np.asarray([input.size(k), input.size(k+1)], np.int32)
batch = input.nelement() // (2*input.size(k) * input.size(k + 1))
idist = input.size(k) * input.size(k + 1)
istride = 1
ostride = istride
odist = idist
rank = 2
plan = cufft.cufftPlanMany(rank, n.ctypes.data, n.ctypes.data, istride,
idist, n.ctypes.data, ostride, odist, type, batch)
self.fft_cache[(input.size(), type, input.get_device())] = plan
def __del__(self):
for keys in self.fft_cache:
try:
cufft.cufftDestroy(self.fft_cache[keys])
except:
pass
def __call__(self, input, direction='C2C', inplace=False, inverse=False):
if direction == 'C2R':
inverse = True
if not isinstance(input, torch.cuda.FloatTensor):
if not isinstance(input, (torch.FloatTensor, torch.DoubleTensor)):
raise(TypeError('The input should be a torch.cuda.FloatTensor, \
torch.FloatTensor or a torch.DoubleTensor'))
else:
input_np = input[..., 0].numpy() + 1.0j * input[..., 1].numpy()
f = lambda x: np.stack((np.real(x), np.imag(x)), axis=len(x.shape))
out_type = input.numpy().dtype
if direction == 'C2R':
out = np.real(np.fft.ifft2(input_np)).astype(out_type)*input.size(-2)*input.size(-3)
return torch.from_numpy(out)
if inplace:
if inverse:
out = f(np.fft.ifft2(input_np)).astype(out_type)*input.size(-2)*input.size(-3)
else:
out = f(np.fft.fft2(input_np)).astype(out_type)
input.copy_(torch.from_numpy(out))
return
else:
if inverse:
out = f(np.fft.ifft2(input_np)).astype(out_type)*input.size(-2)*input.size(-3)
else:
out = f(np.fft.fft2(input_np)).astype(out_type)
return torch.from_numpy(out)
if not iscomplex(input):
raise(TypeError('The input should be complex (e.g. last dimension is 2)'))
if (not input.is_contiguous()):
raise (RuntimeError('Tensors must be contiguous!'))
if direction == 'C2R':
output = input.new(input.size()[:-1])
if(self.fft_cache[(input.size(), cufft.CUFFT_C2R, input.get_device())] is None):
self.buildCache(input, cufft.CUFFT_C2R)
cufft.cufftExecC2R(self.fft_cache[(input.size(), cufft.CUFFT_C2R, input.get_device())],
input.data_ptr(), output.data_ptr())
return output
elif direction == 'C2C':
output = input.new(input.size()) if not inplace else input
flag = cufft.CUFFT_INVERSE if inverse else cufft.CUFFT_FORWARD
if (self.fft_cache[(input.size(), cufft.CUFFT_C2C, input.get_device())] is None):
self.buildCache(input, cufft.CUFFT_C2C)
cufft.cufftExecC2C(self.fft_cache[(input.size(), cufft.CUFFT_C2C, input.get_device())],
input.data_ptr(), output.data_ptr(), flag)
return output
def cdgmm(A, B, jit=True, inplace=False):
"""This function uses the C-wrapper to use cuBLAS.
"""
A, B = A.contiguous(), B.contiguous()
if A.size()[-3:] != B.size():
raise RuntimeError('The filters are not compatible for multiplication!')
if not iscomplex(A) or not iscomplex(B):
raise TypeError('The input, filter and output should be complex')
if B.ndimension() != 3:
raise RuntimeError('The filters must be simply a complex array!')
if type(A) is not type(B):
raise RuntimeError('A and B should be same type!')
if not jit or isinstance(A, (torch.FloatTensor, torch.DoubleTensor)):
C = A.new(A.size())
A_r = A[..., 0].contiguous().view(-1, A.size(-2)*A.size(-3))
A_i = A[..., 1].contiguous().view(-1, A.size(-2)*A.size(-3))
B_r = B[...,0].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_i)
B_i = B[..., 1].contiguous().view(B.size(-2)*B.size(-3)).unsqueeze(0).expand_as(A_r)
C[..., 0].copy_(A_r * B_r - A_i * B_i)
C[..., 1].copy_(A_r * B_i + A_i * B_r)
# faster if B is actually real
#B[...,1] = B[...,0]
#C = A * B.unsqueeze(0).expand_as(A)
return C if not inplace else A.copy_(C)
else:
C = A.new(A.size()) if not inplace else A
m, n = B.nelement() // 2, A.nelement() // B.nelement()
lda = m
ldc = m
incx = 1
handle = torch.cuda.current_blas_handle()
stream = torch.cuda.current_stream()._as_parameter_
cublas.cublasSetStream(handle, stream)
cublas.cublasCdgmm(handle, 'l', m, n, A.data_ptr(), lda, B.data_ptr(), incx, C.data_ptr(), ldc)
return C
| mit |
heli522/scikit-learn | examples/datasets/plot_random_dataset.py | 345 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
hasgeek/funnel | funnel/views/membership.py | 1 | 22583 | """Views for organization admin and project crew membership management."""
from __future__ import annotations
from typing import Optional
from flask import abort, request
from baseframe import _
from baseframe.forms import Form, render_form
from coaster.auth import current_auth
from coaster.views import (
ModelView,
UrlChangeCheck,
UrlForView,
render_with,
requires_roles,
route,
)
from .. import app, signals
from ..forms import (
OrganizationMembershipForm,
ProjectCrewMembershipForm,
ProjectCrewMembershipInviteForm,
)
from ..models import (
MembershipRevokedError,
OrganizationAdminMembershipNotification,
OrganizationAdminMembershipRevokedNotification,
OrganizationMembership,
Profile,
Project,
ProjectCrewMembership,
db,
sa,
)
from ..typing import ReturnRenderWith, ReturnView
from .helpers import html_in_json, render_redirect
from .login_session import requires_login, requires_sudo
from .mixins import ProfileCheckMixin, ProfileViewMixin, ProjectViewMixin
from .notification import dispatch_notification
@Profile.views('members')
@route('/<profile>/members')
class OrganizationMembersView(ProfileViewMixin, UrlForView, ModelView):
def after_loader(self) -> Optional[ReturnView]: # type: ignore[return]
"""Don't render member views for user accounts."""
if not self.obj.organization:
# User accounts (nee profiles) don't have memberships
abort(404)
@route('', methods=['GET', 'POST'])
@render_with('organization_membership.html.jinja2')
@requires_roles({'reader', 'admin'})
def members(self) -> ReturnRenderWith:
"""Render a list of organization admin members."""
return {
'profile': self.obj,
'memberships': [
membership.current_access(datasets=('without_parent', 'related'))
for membership in self.obj.organization.active_admin_memberships
],
}
@route('new', methods=['GET', 'POST'])
@requires_login
@requires_roles({'owner'})
def new_member(self) -> ReturnView:
membership_form = OrganizationMembershipForm()
if request.method == 'POST':
if membership_form.validate_on_submit():
if not membership_form.user.data.has_verified_contact_info:
# users without verified contact information cannot be members
return (
{
'status': 'error',
'error_description': _(
"This user does not have any verified contact"
" information. If you are able to contact them, please"
" ask them to verify their email address or phone"
" number"
),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
previous_membership = (
OrganizationMembership.query.filter(
OrganizationMembership.is_active
)
.filter_by(
organization=self.obj.organization,
user_id=membership_form.user.data.id,
)
.one_or_none()
)
if previous_membership is not None:
return (
{
'status': 'error',
'error_description': _("This user is already an admin"),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
new_membership = OrganizationMembership(
organization=self.obj.organization, granted_by=current_auth.user
)
membership_form.populate_obj(new_membership)
db.session.add(new_membership)
db.session.commit()
dispatch_notification(
OrganizationAdminMembershipNotification(
document=new_membership.organization,
fragment=new_membership,
)
)
return {
'status': 'ok',
'message': _("The user has been added as an admin"),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.organization.active_admin_memberships
],
}, 201
return (
{
'status': 'error',
'error_description': _("The new admin could not be added"),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
membership_form_html = render_form(
form=membership_form,
title='',
submit='Add member',
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': membership_form_html}
OrganizationMembersView.init_app(app)
@OrganizationMembership.views('main')
@route('/<profile>/members/<membership>')
class OrganizationMembershipView(
ProfileCheckMixin, UrlChangeCheck, UrlForView, ModelView
):
model = OrganizationMembership
route_model_map = {'profile': 'organization.name', 'membership': 'uuid_b58'}
obj: OrganizationMembership
def loader(self, profile, membership) -> OrganizationMembership:
return OrganizationMembership.query.filter(
OrganizationMembership.uuid_b58 == membership,
).first_or_404()
def after_loader(self) -> Optional[ReturnView]:
self.profile = self.obj.organization.profile
return super().after_loader()
@route('edit', methods=['GET', 'POST'])
@requires_login
@requires_roles({'profile_owner'})
def edit(self) -> ReturnView:
previous_membership = self.obj
membership_form = OrganizationMembershipForm(obj=previous_membership)
if request.method == 'POST':
if membership_form.validate_on_submit():
if previous_membership.user == current_auth.user:
return {
'status': 'error',
'error_description': _("You can’t edit your own role"),
'form_nonce': membership_form.form_nonce.data,
}
try:
new_membership = previous_membership.replace(
actor=current_auth.user, is_owner=membership_form.is_owner.data
)
except MembershipRevokedError:
return (
{
'status': 'error',
'error_description': _(
"This member’s record was edited elsewhere."
" Reload the page"
),
'form_nonce': membership_form.form_nonce.data,
},
400,
)
if new_membership != previous_membership:
db.session.commit()
dispatch_notification(
OrganizationAdminMembershipNotification(
document=new_membership.organization,
fragment=new_membership,
)
)
return {
'status': 'ok',
'message': (
_("The member’s roles have been updated")
if new_membership != previous_membership
else _("No changes were detected")
),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.organization.active_admin_memberships
],
}
return (
{
'status': 'error',
'error_description': _("Please pick one or more roles"),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
membership_form_html = render_form(
form=membership_form,
title='',
submit='Edit membership',
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': membership_form_html}
@route('delete', methods=['GET', 'POST'])
@requires_sudo
@requires_roles({'profile_owner'})
def delete(self) -> ReturnView:
form = Form()
if form.is_submitted():
if form.validate():
previous_membership = self.obj
if previous_membership.user == current_auth.user:
return {
'status': 'error',
'error_description': _("You can’t revoke your own membership"),
'form_nonce': form.form_nonce.data,
}
if previous_membership.is_active:
previous_membership.revoke(actor=current_auth.user)
db.session.commit()
dispatch_notification(
OrganizationAdminMembershipRevokedNotification(
document=previous_membership.organization,
fragment=previous_membership,
)
)
return {
'status': 'ok',
'message': _("The member has been removed"),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.organization.active_admin_memberships
],
}
return (
{
'status': 'error',
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
form_html = render_form(
form=form,
title=_("Confirm removal"),
message=_("Remove {member} as an admin from {account}?").format(
member=self.obj.user.fullname, account=self.obj.organization.title
),
submit=_("Remove"),
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': form_html}
OrganizationMembershipView.init_app(app)
#: Project Membership views
@Project.views('crew')
@route('/<profile>/<project>/crew')
class ProjectMembershipView(ProjectViewMixin, UrlChangeCheck, UrlForView, ModelView):
@route('', methods=['GET', 'POST'])
@render_with(html_in_json('project_membership.html.jinja2'))
def crew(self) -> ReturnRenderWith:
memberships = [
membership.current_access(datasets=('without_parent', 'related'))
for membership in self.obj.active_crew_memberships
]
return {
'project': self.obj.current_access(datasets=('primary', 'related')),
'memberships': memberships,
}
@route('new', methods=['GET', 'POST'])
@requires_login
@requires_roles({'profile_admin'})
def new_member(self) -> ReturnView:
membership_form = ProjectCrewMembershipForm()
if request.method == 'POST':
if membership_form.validate_on_submit():
if not membership_form.user.data.has_verified_contact_info:
# users without verified contact information cannot be members
return (
{
'status': 'error',
'error_description': _(
"This user does not have any verified contact"
" information. If you are able to contact them, please"
" ask them to verify their email address or phone"
" number"
),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
previous_membership = (
ProjectCrewMembership.query.filter(ProjectCrewMembership.is_active)
.filter_by(project=self.obj, user_id=membership_form.user.data.id)
.one_or_none()
)
if previous_membership is not None:
return (
{
'status': 'error',
'error_description': _("This person is already a member"),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
new_membership = ProjectCrewMembership(
project=self.obj, granted_by=current_auth.user
)
membership_form.populate_obj(new_membership)
db.session.add(new_membership)
# TODO: Once invite is introduced, send invite email here
db.session.commit()
signals.project_role_change.send(
self.obj, actor=current_auth.user, user=new_membership.user
)
signals.project_crew_membership_added.send(
self.obj,
project=self.obj,
membership=new_membership,
actor=current_auth.user,
user=new_membership.user,
)
db.session.commit()
return {
'status': 'ok',
'message': _("The user has been added as a member"),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.active_crew_memberships
],
}
return (
{
'status': 'error',
'error_description': _("Please pick one or more roles"),
'errors': membership_form.errors,
'form_nonce': membership_form.form_nonce.data,
},
400,
)
membership_form_html = render_form(
form=membership_form,
title='',
submit='Add member',
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': membership_form_html}
ProjectMembershipView.init_app(app)
class ProjectCrewMembershipMixin(ProfileCheckMixin):
model = ProjectCrewMembership
route_model_map = {
'profile': 'project.profile.name',
'project': 'project.name',
'membership': 'uuid_b58',
}
obj: ProjectCrewMembership
def loader(self, profile, project, membership) -> ProjectCrewMembership:
return (
ProjectCrewMembership.query.join(Project, Profile)
.filter(
sa.func.lower(Profile.name) == sa.func.lower(profile),
Project.name == project,
ProjectCrewMembership.uuid_b58 == membership,
)
.first_or_404()
)
def after_loader(self) -> Optional[ReturnView]:
self.profile = self.obj.project.profile
return super().after_loader()
@ProjectCrewMembership.views('invite')
@route('/<profile>/<project>/crew/<membership>/invite')
class ProjectCrewMembershipInviteView(
ProjectCrewMembershipMixin, UrlChangeCheck, UrlForView, ModelView
):
def loader(self, profile, project, membership) -> ProjectCrewMembership:
obj = super().loader(profile, project, membership)
if not obj.is_invite or obj.user != current_auth.user:
abort(404)
return obj
@route('', methods=['GET'])
@render_with('membership_invite_actions.html.jinja2')
@requires_login
def invite(self) -> ReturnRenderWith:
return {
'membership': self.obj.current_access(datasets=('primary', 'related')),
'form': Form(),
}
@route('action', methods=['POST'])
@requires_login
def invite_action(self) -> ReturnView:
membership_invite_form = ProjectCrewMembershipInviteForm()
if membership_invite_form.validate_on_submit():
if membership_invite_form.action.data == 'accept':
self.obj.accept(actor=current_auth.user)
elif membership_invite_form.action.data == 'decline':
self.obj.revoke(actor=current_auth.user)
db.session.commit()
return render_redirect(self.obj.project.url_for())
ProjectCrewMembershipInviteView.init_app(app)
@ProjectCrewMembership.views('main')
@route('/<profile>/<project>/crew/<membership>')
class ProjectCrewMembershipView(
ProjectCrewMembershipMixin, UrlChangeCheck, UrlForView, ModelView
):
@route('edit', methods=['GET', 'POST'])
@requires_login
@requires_roles({'profile_admin'})
def edit(self) -> ReturnView:
previous_membership = self.obj
form = ProjectCrewMembershipForm(obj=previous_membership)
if form.is_submitted():
if form.validate():
try:
previous_membership.replace(
actor=current_auth.user,
is_editor=form.is_editor.data,
is_promoter=form.is_promoter.data,
is_usher=form.is_usher.data,
)
except MembershipRevokedError:
return (
{
'status': 'error',
'error_description': _(
"The member’s record was edited elsewhere."
" Reload the page"
),
'form_nonce': form.form_nonce.data,
},
400,
)
db.session.commit()
signals.project_role_change.send(
self.obj.project, actor=current_auth.user, user=self.obj.user
)
db.session.commit()
return {
'status': 'ok',
'message': _("The member’s roles have been updated"),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.project.active_crew_memberships
],
}
return (
{
'status': 'error',
'error_description': _("Please pick one or more roles"),
'errors': form.errors,
'form_nonce': form.form_nonce.data,
},
400,
)
membership_form_html = render_form(
form=form,
title='',
submit='Edit membership',
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': membership_form_html}
@route('delete', methods=['GET', 'POST'])
@requires_sudo
@requires_roles({'profile_admin'})
def delete(self) -> ReturnView:
form = Form()
if request.method == 'POST':
if form.validate_on_submit():
previous_membership = self.obj
if previous_membership.is_active:
previous_membership.revoke(actor=current_auth.user)
signals.project_crew_membership_revoked.send(
self.obj.project,
project=self.obj.project,
membership=previous_membership,
actor=current_auth.user,
user=previous_membership.user,
)
db.session.commit()
signals.project_role_change.send(
self.obj.project, actor=current_auth.user, user=self.obj.user
)
db.session.commit()
return {
'status': 'ok',
'message': _("The member has been removed"),
'memberships': [
membership.current_access(
datasets=('without_parent', 'related')
)
for membership in self.obj.project.active_crew_memberships
],
}
return ({'status': 'error', 'errors': form.errors}, 400)
form_html = render_form(
form=form,
title=_("Confirm removal"),
message=_("Remove {member} as a crew member from this project?").format(
member=self.obj.user.fullname
),
submit=_("Remove"),
ajax=False,
with_chrome=False,
).get_data(as_text=True)
return {'status': 'ok', 'form': form_html}
ProjectCrewMembershipView.init_app(app)
| agpl-3.0 |
abergeron/DeepLearningTutorials | code/mlp.py | 11 | 14283 | """
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sorted in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)
if __name__ == '__main__':
test_mlp()
| bsd-3-clause |
googlearchive/rgc-models | response_model/python/metric_learning/end_to_end/bookkeeping.py | 1 | 7727 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Book-keeping operations (initialize, save/reload models, summary)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
def get_filename(training_datasets, testing_datasets, beta, sr_model):
"""Get filename for saving results."""
if sr_model == 'convolutional_embedding':
model_str = '' # For past consistency.
else:
model_str = '_%s' % sr_model
file_name = ('end_to_end%s_stim_%s_resp_%s_beta_%d_taskid_%d'
'_training_%s_testing_%s_%s' % (model_str,
FLAGS.stim_layers,
FLAGS.resp_layers, beta,
FLAGS.taskid,
str(training_datasets)[1: -1],
str(testing_datasets)[1: -1],
FLAGS.save_suffix))
if FLAGS.subsample_cells_train:
file_name += '_subsample_cells'
if sr_model == 'convolutional_encoder_2' or sr_model == 'convolutional_encoder_using_retina_id':
file_name += ('reg_%.3f_%.3f_%.3f_%.3f' % (FLAGS.scale_regularization,
FLAGS.scale_triplet,
FLAGS.scale_encode,
FLAGS.scale_decode))
if FLAGS.use_EIs:
file_name += ('_bn_ei_%s_ei_layers_%s_scales_%.3f_%.3f_%.3f' %
(FLAGS.batch_norm_ei, FLAGS.ei_layers,
FLAGS.scale_encode_from_ei,
FLAGS.scale_regularization_from_ei,
FLAGS.scale_match_embeddding))
return file_name
def initialize_model(save_folder, file_name, sess):
"""Setup model variables and saving information.
Args :
save_folder (string) : Folder to store model.
Makes one if it does not exist.
file_name (string) : Prefix of model/checkpoint files.
sess : Tensorflow session.
Returns :
saver_var : TF op for saving parameters.
start_iter (int): loaded iteration to start from.
"""
# Make folder.
if not gfile.IsDirectory(save_folder):
gfile.MkDir(save_folder)
# Initialize variables.
saver_var, start_iter = initialize_variables(sess, save_folder, file_name)
return saver_var, start_iter
def initialize_variables(sess, save_folder, short_filename):
"""Initialize variables de-novo or restore from previous fits."""
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer()))
saver_var = tf.train.Saver(tf.global_variables(),
keep_checkpoint_every_n_hours=5)
load_prev = False
start_iter = 0
try:
# Restore previous fits if they are available
# - useful when programs are preempted frequently on .
latest_filename = short_filename + '_latest_fn'
restore_file = tf.train.latest_checkpoint(save_folder,
latest_filename)
# Restore previous iteration count and start from there.
start_iter = int(restore_file.split('/')[-1].split('-')[-1])
saver_var.restore(sess, restore_file) # restore variables
load_prev = True
except ValueError:
tf.logging.info('No previous dataset - cant load file')
except AttributeError:
tf.logging.info('No previous dataset - cant find start_iter')
if load_prev:
tf.logging.info('Previous results loaded from: ' + restore_file)
else:
tf.logging.info('Variables initialized')
tf.logging.info('Loaded iteration: %d' % start_iter)
return saver_var, start_iter
def save_model(saver_var, save_folder, file_name, sess, iiter):
"""Save model variables."""
latest_filename = file_name + '_latest_fn'
long_filename = os.path.join(save_folder, file_name)
saver_var.save(sess, long_filename, global_step=iiter,
latest_filename=latest_filename)
def write_tensorboard(sess, stim_test_embed, labels, label_images=None,
embedding_name='stim_embed',
log_dir='/home/bhaishahster/tb',
model_name='model'):
"""Save embedding to visualize in tensorboard."""
## Tensorboard embedding visualization
# from tf.contrib.tensorboard.plugins import projector
projector = tf.contrib.tensorboard.plugins.projector
# Create randomly initialized embedding weights which will be trained.
embedding_var = tf.Variable(stim_test_embed.astype(np.float32),
name=embedding_name)
config = projector.ProjectorConfig()
# You can add multiple embeddings. Here we add only one.
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# Link this tensor to its metadata file (e.g. labels).
embedding.metadata_path = os.path.join(log_dir,
'metadata.tsv')
# Optionally, give label images.
if label_images is not None:
embedding.sprite.image_path = os.path.join(log_dir,
'sprite_images_%s.png' %
model_name)
embedding.sprite.single_image_dim.extend([label_images.shape[1],
label_images.shape[2]])
# Use the same LOG_DIR where you stored your checkpoint.
summary_writer = tf.summary.FileWriter(log_dir)
# The next line writes a projector_config.pbtxt in the LOG_DIR.
# TensorBoard will read this file during startup.
projector.visualize_embeddings(summary_writer, config)
sess.run(tf.variables_initializer(var_list=[embedding_var]))
saver = tf.train.Saver(var_list=[embedding_var])
saver.save(sess, os.path.join(log_dir, '%s.ckpt' % model_name), 0)
# Create metadata
with open(embedding.metadata_path, 'w') as f:
print(embedding.metadata_path)
f.write('Index\tLabel\n')
for index, label in enumerate(labels):
f.write('%d\t%d\n' % (index, label))
if label_images is not None:
sprite_image = create_sprite_image(label_images)
plt.imsave(embedding.sprite.image_path, sprite_image, cmap='gray')
def create_sprite_image(images):
"""Returns a sprite image consisting of images passed as argument."""
if isinstance(images, list):
images = np.array(images) # Images should be count x width x height
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
spriteimage = np.ones((img_h * n_plots, img_w * n_plots))
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
spriteimage[i * img_h:(i + 1) * img_h,
j * img_w:(j + 1) * img_w] = this_img
return spriteimage
| apache-2.0 |
pagea/unstyle | classifyTest.py | 1 | 3980 | # Load, train, and classify documents.
# This serves as a prototype for experimenting with document classification. This
# module should NOT be referred to by any other classes.
from unstyle.dochandler import DocumentExtractor
from unstyle.featuresets.basic9 import Basic9Extractor
from sklearn import svm
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
import codecs
import numpy as np
import matplotlib.pyplot as plt
# load our list of training document paths (original author)
labels = []
paths = []
with open("trainingDocs.txt", "r") as trainingDocs:
for line in trainingDocs.readlines():
paths.append(line.split(':')[0])
labels.append(line.split(':')[1].replace('\n', ''))
#print("Training set with label: " + line)
# load our list of test document paths
testlabels = []
testpaths = []
with open("testDocs.txt", "r") as testDocs:
for line in testDocs.readlines():
testpaths.append(line.split(':')[0])
testlabels.append(line.split(':')[1].replace('\n', ''))
# load each file in our list of paths
trainingStrings = []
for path in paths:
with codecs.open(path, "r", encoding='utf8', errors='replace') as docString:
document = docString.read()
trainingStrings.append(document)
# load each of our testfile path list
testStrings = []
for path in testpaths:
with codecs.open(path, "r", encoding='utf8', errors='replace') as docString:
document = docString.read()
testStrings.append(document)
# extract features from each document
print("Extracting features...")
extractedFeats = DocumentExtractor(Basic9Extractor(), trainingStrings)
extracted = extractedFeats.docExtract()
# extract features from our test documents:
testFeats = DocumentExtractor(Basic9Extractor(), testStrings)
testvecs = testFeats.docExtract()
# scale our feature vectors to make them suitable for SVM input
print("Scaling feature vectors...")
extracted = preprocessing.scale(extracted)
# instantiate classifier and train it
print("Instantiating classifier...")
clf = svm.SVC(probability=True, kernel='linear', class_weight='auto')
print("Fitting dataset to classifier...")
clf.fit(extracted, labels)
# do some predictions, again with test vectors scaled
print("Computing predictions...")
normalizedpredic = clf.predict(preprocessing.scale(testvecs))
# compute number of authors
authorSet = set()
for label in labels:
authorSet.add(label)
print("Comparing authors:")
for n in authorSet:
print(n)
#print(str(clf.score(preprocessing.scale(testvecs), testlabels) * 100) + "% score on this dataset.")
testvecsscaled = preprocessing.scale(testvecs)
# Cross-validation
print("Computing cross validation...")
cvIterations = 7
scores = cross_validation.cross_val_score(clf, extracted, labels,
cv=cvIterations)
print("Accuracy by " + str(cvIterations) + "-fold CV: %0.2f (+/- %0.2f)" %
(scores.mean(), scores.std() * 2))
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(authorSet))
plt.xticks(tick_marks, list(authorSet), rotation=45)
plt.yticks(tick_marks, list(authorSet))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
y_pred = clf.predict(testvecsscaled)
print("Predictions: ", y_pred)
cm = confusion_matrix(labels, y_pred)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cm)
print(cm)
report = print(classification_report(
labels, y_pred, target_names=[str(x) for x in range(13)]))
plt.show()
| mit |
negrinho/deep_architect | dev/hyperband/test_hyperband.py | 1 | 6905 | # Test with mnist
# Using TensorFlow Backend
# Search Space
import keras
import numpy as np
import deep_architect.modules as mo
import deep_architect.hyperparameters as hp
from deep_architect.contrib.misc..search_spaces.tensorflow.common import siso_tfm
D = hp.Discrete # Discrete Hyperparameter
def flatten():
def compile_fn(di, dh):
Flatten = keras.layers.Flatten()
def fn(di):
return {'out': Flatten(di['in'])}
return fn
return siso_tfm('Flatten', compile_fn, {}) # use siso_tfm for now
def dense(h_units):
def compile_fn(di, dh):
Dense = keras.layers.Dense(dh['units'])
def fn(di):
return {'out' : Dense(di['in'])}
return fn
return siso_tfm('Dense', compile_fn, {'units' : h_units})
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = keras.layers.Activation('relu')(di['in'])
elif nonlin_name == 'tanh':
Out = keras.layers.Activation('tanh')(di['in'])
elif nonlin_name == 'elu':
Out = keras.layers.Activation('elu')(di['in'])
else:
raise ValueError
return {"out" : Out}
return fn
return siso_tfm('Nonlinearity', compile_fn, {'nonlin_name' : h_nonlin_name})
def dropout(h_keep_prob):
def compile_fn(di, dh):
Dropout = keras.layers.Dropout(dh['keep_prob'])
def fn(di):
return {'out' : Dropout(di['in'])}
return fn
return siso_tfm('Dropout', compile_fn, {'keep_prob' : h_keep_prob})
def batch_normalization():
def compile_fn(di, dh):
bn = keras.layers.BatchNormalization()
def fn(di):
return {'out' : bn(di['in'])}
return fn
return siso_tfm('BatchNormalization', compile_fn, {})
def dnn_net_simple(num_classes):
# defining hyperparameter
h_num_hidden = D([64, 128, 256, 512, 1024]) # number of hidden units for affine transform module
h_nonlin_name = D(['relu', 'tanh', 'elu']) # nonlinearity function names to choose from
h_opt_drop = D([0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include
h_drop_keep_prob = D([0.25, 0.5, 0.75]) # dropout probability to choose from
h_opt_bn = D([0, 1]) # batch_norm optional hyperparameter
h_swap = D([0, 1]) # order of swapping for permutation
h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice
# defining search space topology
model = mo.siso_sequential([
flatten(),
mo.siso_repeat(lambda: mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_permutation([
lambda: mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
lambda: mo.siso_optional(batch_normalization, h_opt_bn),
], h_swap)
]), h_num_repeats),
dense(D([num_classes]))
])
return model
def dnn_cell(h_num_hidden, h_nonlin_name, h_swap, h_opt_drop, h_opt_bn, h_drop_keep_prob):
return mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_permutation([
lambda: mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
lambda: mo.siso_optional(batch_normalization, h_opt_bn),
], h_swap)])
def dnn_net(num_classes):
h_nonlin_name = D(['relu', 'tanh', 'elu'])
h_swap = D([0, 1])
h_opt_drop = D([0, 1])
h_opt_bn = D([0, 1])
return mo.siso_sequential([
flatten(),
mo.siso_repeat(lambda: dnn_cell(
D([64, 128, 256, 512, 1024]),
h_nonlin_name, h_swap, h_opt_drop, h_opt_bn,
D([0.25, 0.5, 0.75])), D([1, 2])),
dense(D([num_classes]))])
# Evaluator
class SimpleClassifierEvaluator:
def __init__(self, train_dataset, num_classes, batch_size=256,
learning_rate=1e-3, metric='val_loss', resource_type='epoch'):
self.train_dataset = train_dataset
self.num_classes = num_classes
self.learning_rate = learning_rate
self.batch_size = batch_size
self.val_split = 0.1 # 10% of dataset for validation
self.metric = metric
self.resource_type = resource_type
def evaluate(self, inputs, outputs, resource):
keras.backend.clear_session()
(x_train, y_train) = self.train_dataset
X = keras.layers.Input(x_train[0].shape)
co.forward({inputs['in'] : X})
logits = outputs['out'].val
probs = keras.layers.Softmax()(logits)
model = keras.models.Model(inputs=[inputs['in'].val], outputs=[probs])
optimizer = keras.optimizers.Adam(lr=self.learning_rate)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train,
batch_size=self.batch_size,
epochs=resource,
validation_split=self.val_split)
final_val_acc = history.history['val_acc'][-1]
metric_values = history.history['val_acc'] if self.metric == 'val_accuracy' else history.history['loss']
info = {self.resource_type: [i for i in range(resource)],
self.metric: metric_values}
results = {'val_accuracy': final_val_acc,
'history': info}
return results
# Main/Searcher
import deep_architect.searchers.random as se
import deep_architect.core as co
from keras.datasets import mnist
from hyperband import SimpleArchitectureSearchHyperBand
def get_search_space(num_classes):
def fn():
co.Scope.reset_default_scope()
inputs, outputs = dnn_net(num_classes)
return inputs, outputs, {}
return fn
def main():
num_classes = 10
num_samples = 3 # number of architecture to sample
metric = 'val_accuracy' # evaluation metric
resource_type = 'epoch'
max_resource = 81 # max resource that a configuration can have
# load and normalize data
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# defining searcher and evaluator
evaluator = SimpleClassifierEvaluator((x_train, y_train), num_classes,
max_num_training_epochs=5)
searcher = se.RandomSearcher(get_search_space(num_classes))
hyperband = SimpleArchitectureSearchHyperBand(searcher, hyperband, metric, resource_type)
(best_config, best_perf) = hyperband.evaluate(max_resource)
print("Best %s is %f with architecture %d" % (metric, best_perf[0], best_config[0]))
if __name__ == "__main__":
main() | mit |
heli522/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 352 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
ifuding/Kaggle | PMRCN/Code/siamese_net.py | 1 | 22230 |
from sklearn import *
import sklearn
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
from time import gmtime, strftime
import numpy.random as rng
from multiprocessing.dummy import Pool
import h5py
import concurrent.futures
import tensorflow as tf
import multiprocessing as mp
from sklearn.cross_validation import KFold
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.layers.embeddings import Embedding
from keras.layers import Input, concatenate, merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras import backend as K
from sklearn.metrics import log_loss
from keras import __version__ as keras_version
graph = tf.get_default_graph()
HIDDEN_UNITS = [64, 16, 8]
DNN_EPOCHS = 40
BATCH_SIZE = 5
DNN_BN = True
DROPOUT_RATE = 0.5
SIAMESE_PAIR_SIZE = 100000
MAX_WORKERS = 8
EMBEDDING_SIZE = 6
full_feature = True
data_folder = '../Data/'
train = pd.read_csv(data_folder + 'training_variants')
#print train.dtypes
test = pd.read_csv(data_folder + 'test_variants')
trainx = pd.read_csv(data_folder + 'training_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
#print trainx.dtypes
testx = pd.read_csv(data_folder + 'test_text', sep="\|\|", engine='python', header=None, skiprows=1, names=["ID","Text"])
train = pd.merge(train, trainx, how='left', on='ID').fillna('')
#train = train.iloc[1:1000]
y = train['Class'].values
train = train.drop(['Class'], axis=1)
test = pd.merge(test, testx, how='left', on='ID').fillna('')
pid = test['ID'].values
#df_all = pd.concat((train, test), axis=0, ignore_index=True)
#df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ') if w in r['Text'].split(' ')]), axis=1).astype(np.int8)
#
#print df_all[['Gene_Share', 'Variation_Share']].max()
## exit(0)
#if full_feature:
# #commented for Kaggle Limits
# for i in range(5):
# df_all['Gene_'+str(i)] = df_all['Gene'].map(lambda x: str(x[i]) if len(x)>i else '')
# df_all['Variation'+str(i)] = df_all['Variation'].map(lambda x: str(x[i]) if len(x)>i else '')
# print df_all.dtypes
#
# gen_var_lst = sorted(list(train.Gene.unique()) + list(train.Variation.unique()))
# print(len(gen_var_lst))
# gen_var_lst = [x for x in gen_var_lst if len(x.split(' '))==1]
# print(len(gen_var_lst))
# i_ = 0
# #commented for Kaggle Limits
# for gen_var_lst_itm in gen_var_lst:
# if i_ % 100 == 0: print(i_)
# df_all['GV_'+str(gen_var_lst_itm)] = df_all['Text'].map(lambda x: str(x).count(str(gen_var_lst_itm))).astype(np.int8)
# i_ += 1
# if i_ == 5:
# break
#
#for c in df_all.columns:
# if df_all[c].dtype == 'object':
# if c in ['Gene','Variation']:
# lbl = preprocessing.LabelEncoder()
# df_all[c+'_lbl_enc'] = lbl.fit_transform(df_all[c].values)
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
# elif c != 'Text':
# lbl = preprocessing.LabelEncoder()
# df_all[c] = lbl.fit_transform(df_all[c].values)
# if c=='Text':
# df_all[c+'_len'] = df_all[c].map(lambda x: len(str(x)))
# df_all[c+'_words'] = df_all[c].map(lambda x: len(str(x).split(' ')))
#
#train = df_all.iloc[:len(train)]
#print "... train dtypes before svd ..."
#print train.dtypes
#print train.head()
#exit(0)
#test = df_all.iloc[len(train):]
#
#class cust_regression_vals(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# x = x.drop(['Gene', 'Variation','ID','Text'],axis=1).values
# return x
#
#class cust_txt_col(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
# def __init__(self, key):
# self.key = key
# def fit(self, x, y=None):
# return self
# def transform(self, x):
# return x[self.key].apply(str)
#
#print('Pipeline...')
#fp = pipeline.Pipeline([
# ('union', pipeline.FeatureUnion(
# n_jobs = -1,
# transformer_list = [
# ('standard', cust_regression_vals()),
# ('pi1', pipeline.Pipeline([('Gene', cust_txt_col('Gene')), ('count_Gene', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd1', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# ('pi2', pipeline.Pipeline([('Variation', cust_txt_col('Variation')), ('count_Variation', feature_extraction.text.CountVectorizer(analyzer=u'char', ngram_range=(1, 8))), ('tsvd2', decomposition.TruncatedSVD(n_components=20, n_iter=25, random_state=12))])),
# #commented for Kaggle Limits
# ('pi3', pipeline.Pipeline([('Text', cust_txt_col('Text')), ('tfidf_Text', feature_extraction.text.TfidfVectorizer(ngram_range=(1, 2))), ('tsvd3', decomposition.TruncatedSVD(n_components=50, n_iter=25, random_state=12))]))
# ])
# )])
#
#train = fp.fit_transform(train);
#print type(train)
#print(train.shape)
#print (train.nbytes)
#np.save("train_array", train)
## print(df.dtypes)
## print(df.memory_usage())
#test = fp.transform(test); print(test.shape)
#np.save("test_array", test)
#exit(0)
train = np.load("./train_array.npy")
test = np.load("./test_array.npy")
# siamese_features_array = np.load("./siamese_features_array_2017_09_15_07_57_44.npy")
y = y - 1 #fix for zero bound array
CONTINUOUS_INDICES = []
SPARSE_INDICES = []
for i in range((train.shape)[1]):
if (i >= 3205 and i <= 3212):
pass
elif (i >= 2 and i <= 113): # or (i >= 114 and i <= 3204):
SPARSE_INDICES.append(i)
else:
CONTINUOUS_INDICES.append(i)
#train = train[:, CONTINUOUS_INDICES]
#test = test[:, CONTINUOUS_INDICES]
print('train shape after loading and selecting trainging columns: %s' % str(train.shape))
siamese_train_len = len(train) // 3
print('siamese_train_len is %d' % (siamese_train_len))
siamese_train_data = train[:siamese_train_len]
siamese_train_label = y[:siamese_train_len]
lgbm_train_data = train[siamese_train_len:]
lgbm_train_label = y[siamese_train_len:]
#train = train[:200]
#y = y[:200]
#test = test[:200]
#pid = pid[:200]
def xgbTrain(train_data, train_label, flod = 5):
"""
"""
denom = 0
fold = 5 #Change to 5, 1 for Kaggle Limits
models = []
for i in range(fold):
params = {
'eta': 0.03333,
'max_depth': 4,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'num_class': 9,
'seed': i,
'silent': True
}
x1, x2, y1, y2 = model_selection.train_test_split(train_data, train_label, test_size=0.18, random_state=i)
watchlist = [(xgb.DMatrix(x1, y1), 'train'), (xgb.DMatrix(x2, y2), 'valid')]
model = xgb.train(params, xgb.DMatrix(x1, y1), 1000, watchlist, verbose_eval=50, early_stopping_rounds=100)
score1 = metrics.log_loss(y2, model.predict(xgb.DMatrix(x2), ntree_limit=model.best_ntree_limit), labels = list(range(9)))
#print(score1)
models.append((model, 'x'))
return models
def lgbm_train(train_data, train_label, fold = 5):
"""
LGB Training
"""
# print train.shape
# print siamese_features_array.shape
# train_merge = siamese_features_array #np.concatenate((train, siamese_features_array), axis = 1)
# print train_merge.shape
# # exit(0)
print("Over all training size:")
print(train_data.shape)
# train_data = train_merge#[:train_len * 3 / 10]
# train_label = lgbm_train_label#[:train_len * 3 / 10]
#valide_data = train_merge[train_len * 9 / 10:]
#valide_label = y[train_len * 9 / 10:]
models = []
for i in range(fold):
d_train = lgb.Dataset(train_data, train_label) #, categorical_feature = SPARCE_INDICES)
#d_valide = lgb.Dataset(valide_data, valide_label)
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'multiclass',
'metric': {'multi_logloss'},
'num_class': 9,
# 'num_leaves': 256,
# 'max_depth': 12,
# 'feature_fraction': 0.9,
# 'bagging_fraction': 0.95,
# 'bagging_freq': 5,
'num_leaves': 60, # 60,
# 'min_sum_hessian_in_leaf': 20,
'max_depth': 10, # 10,
'learning_rate': 0.02, # 0.02,
'feature_fraction': 0.5,
'verbose': 0,
# 'valid_sets': [d_valide],
'num_boost_round': 327,
'feature_fraction_seed': i,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 15,
# 'bagging_seed': i,
# 'early_stopping_round': 10
# 'random_state': 10
# 'verbose_eval': 20
#'min_data_in_leaf': 665
}
# ROUNDS = 1
print('fold: %d th light GBM train :-)' % (i))
# params['feature_fraction_seed'] = i
#bst = lgb.train(
# params ,
# d_train,
# verbose_eval = False
# # valid_sets = [d_valide]
# #num_boost_round = 1
# )
cv_result = lgb.cv(params, d_train, nfold=10)
pd.DataFrame(cv_result).to_csv('cv_result', index = False)
exit(0)
# pred = model_eval(bst, 'l', test)
#print pred.shape
#print pred[0, :]
models.append((bst, 'l'))
return models
def create_model(input_len):
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_len))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
# model.add(Dropout(0.1))
#model.add(Dense(32, activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(9, activation='softmax'))
# optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def create_embedding_model(CONTINUE_SIZE, SPARSE_SIZE):
"""
"""
print('CONTINUOUS_SIZE = %d' % CONTINUE_SIZE)
print('SPARSE_SIZE = %d' % SPARSE_SIZE)
sparse_feature = Input(shape=(SPARSE_SIZE,))
sparse_embedding = Embedding(55, EMBEDDING_SIZE, input_length = SPARSE_SIZE)(sparse_feature)
sparse_embedding = Reshape((EMBEDDING_SIZE * SPARSE_SIZE,))(sparse_embedding)
# print "model input size: %d" % CONTINUOUS_COLUMNS
dense_input = Input(shape=(CONTINUE_SIZE,))
merge_input = concatenate([dense_input, sparse_embedding], axis = 1)
merge_len = CONTINUE_SIZE + EMBEDDING_SIZE * SPARSE_SIZE
output = create_model(merge_len)(merge_input)
model = Model([dense_input, sparse_feature], output)
optimizer = RMSprop(lr=1e-3, rho = 0.9, epsilon = 1e-8)
# optimizer = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer = Adam(),
loss='categorical_crossentropy', metrics = ['accuracy'])
return model
def keras_train(train_data, train_target, nfolds = 10):
"""
Detect Fish or noFish
"""
print("Start gen training data, shuffle and normalize!")
#train_data = train
train_target = np_utils.to_categorical(train_target)
# train_data, train_target, siamese_data_loader = siamese_train(siamese_train_data, siamese_train_label)
kf = KFold(len(train_target), n_folds=nfolds, shuffle=True)
num_fold = 0
models = []
for train_index, test_index in kf:
# model = create_model(classes = 2)
model = create_embedding_model(len(CONTINUOUS_INDICES), len(SPARSE_INDICES))
# model = create_siamese_net((train.shape)[1])
X_train = train_data[train_index]
Y_train = train_target[train_index]
print('Positive samples in train: %d' % np.sum(Y_train))
print('Negative samples in train: %d' % (len(Y_train) - np.sum(Y_train)))
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
print('Positive samples in valide: %d' % np.sum(Y_valid))
print('Negative samples in valide: %d' % (len(Y_valid) - np.sum(Y_valid)))
num_fold += 1
print('Start KFold number {} from {}'.format(num_fold, nfolds))
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
callbacks = [
EarlyStopping(monitor='val_loss', patience=5, verbose=0),
]
model.fit([X_train[:, CONTINUOUS_INDICES], X_train[:, SPARSE_INDICES]],
Y_train, batch_size=BATCH_SIZE, epochs=DNN_EPOCHS,
shuffle=True, verbose=2,
validation_data=([X_valid[:, CONTINUOUS_INDICES], X_valid[:, SPARSE_INDICES]], Y_valid)
, callbacks=callbacks)
model_name = 'keras' + strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
#model.save_weights(model_name)
#siamese_features_array = gen_siamese_features(model, lgbm_train_data, siamese_train_data, siamese_train_label)
models.append((model, 'k'))
break
return models #, siamese_features_array
def model_eval(model, model_type, data_frame):
"""
"""
if model_type == 'l':
preds = model.predict(data_frame)
elif model_type == 'k':
preds = model.predict(data_frame, batch_size=BATCH_SIZE, verbose=2)
elif model_type == 't':
print("ToDO")
elif model_type == 'x':
preds = model.predict(xgb.DMatrix(data_frame), ntree_limit=model.best_ntree_limit+80)
return preds
def gen_sub(models, merge_features):
"""
Evaluate single Type model
"""
print('Start generate submission!')
preds = None
for (model, model_type) in models:
pred = model_eval(model, model_type, merge_features)
#print pred.shape
#print pred[0, :]
if preds is None:
preds = pred.copy()
else:
preds += pred
preds /= len(models)
submission = pd.DataFrame(preds, columns=['class'+str(c+1) for c in range(9)])
submission['ID'] = pid
sub_name = "submission" + strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) + ".csv"
print('Output to ' + sub_name)
submission.to_csv(sub_name, index=False)
def create_siamese_net(input_size):
"""
"""
input_shape = (input_size, )
left_input = Input(input_shape)
right_input = Input(input_shape)
#build model to use in each siamese 'leg'
model = Sequential()
model.add(Dense(HIDDEN_UNITS[0], activation='sigmoid', input_dim = input_size))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(HIDDEN_UNITS[1], activation='sigmoid'))
if DNN_BN:
model.add(BatchNormalization())
if DROPOUT_RATE > 0:
model.add(Dropout(DROPOUT_RATE))
#encode each of the two inputs into a vector with the convnet
encoded_l = model(left_input)
encoded_r = model(right_input)
#merge two encoded inputs with the l1 distance between them
L1_distance = lambda x: K.abs(x[0]-x[1])
both = merge([encoded_l,encoded_r], mode = L1_distance, output_shape=lambda x: x[0])
merge_layer = Dense(HIDDEN_UNITS[2],activation='sigmoid')(both)
prediction = Dense(1,activation='sigmoid')(merge_layer)
siamese_net = Model(input=[left_input,right_input],output=prediction)
#optimizer = SGD(0.0004,momentum=0.6,nesterov=True,decay=0.0003)
optimizer = Adam()
#//TODO: get layerwise learning rates and momentum annealing scheme described in paperworking
siamese_net.compile(loss="binary_crossentropy",optimizer=optimizer)
# print siamese_net.count_params()
return siamese_net
class Siamese_Loader:
#For loading batches and testing tasks to a siamese net
def __init__(self,Xtrain,Xval = None):
self.Xval = Xval
self.Xtrain = Xtrain
self.n_classes = Xtrain.shape[0]
self.feature_size = (Xtrain[0].shape)[1]
self.n_examples = np.array([x.shape[0] for x in Xtrain])
self.n_tot_examples = np.sum(self.n_examples)
print('examples of different classes: %s' % str(self.n_examples))
# self.n_val,self.n_ex_val,_,_ = Xval.shape
def get_batch(self,n):
#Create batch of pairs, half same class, half different class
categories = rng.choice(self.n_classes,size=(n,),replace=True)
pairs=np.zeros((2, n, self.feature_size))
targets=np.zeros((n,))
positive_begin_pos = n * 1 // 2
targets[positive_begin_pos:] = 1
categories_list = []
for i in range(n):
category = categories[i]
idx_1 = rng.randint(0, self.n_examples[category])
pairs[0][i] = self.Xtrain[category][idx_1] #.reshape(self.feature_size)
#pick images of same class for 1st half, different for 2nd
category_2 = category if i >= positive_begin_pos else (category + rng.randint(1,self.n_classes)) % self.n_classes
idx_2 = rng.randint(0,self.n_examples[category_2])
while i >= positive_begin_pos and idx_2 == idx_1:
idx_2 = rng.randint(0,self.n_examples[category_2])
pairs[1][i] = self.Xtrain[category_2][idx_2] #.reshape(self.w,self.h,1)
categories_list.append((category, category_2))
#pd.DataFrame(categories_list).to_csv('categories', index=False)
#exit(0)
# shuflle pairs to mix positive and negative
rng.shuffle(pairs)
return pairs, targets
def gen_test_on_support_data(Xsupport, Xtest):
"""
"""
n_support, feature_size = Xsupport.shape
pairs = np.zeros((2, n_support, feature_size))
pairs[0] = Xtest
pairs[1] = Xsupport
return list(pairs)
def siamese_train(siamese_train_data, siamese_train_label):
"""
"""
train_data = [[] for i in range(9)]
label_ind = 0
for feature in siamese_train_data:
train_data[siamese_train_label[label_ind]].append(feature)
label_ind += 1
train_data = np.array([np.array(xi) for xi in train_data])
print("train data shape before gen pair")
print(train_data.shape)
siamese_data_loader = Siamese_Loader(train_data, test)
pairs, targets = siamese_data_loader.get_batch(SIAMESE_PAIR_SIZE)
return pairs, targets, siamese_data_loader
def gen_siamese_features_meta(model, Xsupport_label, Xsupport, Xtest):
"""
"""
siamese_pair = gen_test_on_support_data(Xsupport, Xtest)
global graph
with graph.as_default():
preds = model.predict(siamese_pair, batch_size=BATCH_SIZE, verbose=2)
preds = np.insert(preds, 1, Xsupport_label, axis = 1)
preds = pd.DataFrame(preds, columns = ['sim', 'class'])
siamese_features = preds.groupby('class', sort = False) \
.agg({'sim': ['max', 'min', 'median', 'mean', 'std']})
max_class = siamese_features['sim']['max'].idxmax()
siamese_features = np.insert(siamese_features.values.flatten(), 0, max_class, axis = 0)
return siamese_features
def gen_siamese_features(siamese_model, Xtest, Xsupport, Xsupport_label):
"""
"""
if MAX_WORKERS <= 0:
print("MAX_WORKERS should >= 1", file=sys.stderr)
exit(1)
siamese_features_array = list(range(len(Xtest)))
test_begin = 0
while test_begin < len(Xtest):
test_end = min(test_begin + MAX_WORKERS, len(Xtest))
with concurrent.futures.ThreadPoolExecutor(max_workers = MAX_WORKERS) as executor:
future_predict = {executor.submit(gen_siamese_features_meta, siamese_model,
Xsupport_label,
Xsupport,
Xtest[ind]): ind for ind in range(test_begin, test_end)}
for future in concurrent.futures.as_completed(future_predict):
ind = future_predict[future]
try:
siamese_features = future.result()
siamese_features_array[ind] = siamese_features
except Exception as exc:
print('%dth feature generated an exception: %s' % (ind, exc))
test_begin = test_end
if test_begin % 100 == 0:
print('Gen %d siamsese features' % test_begin)
if test_begin != len(Xtest):
print("Only gen %d siamese features" % test_begin, file=sys.stderr)
exit(1)
siamese_features_array = np.array(siamese_features_array)
pd.DataFrame(siamese_features_array[:, 0]).astype(np.int8).to_csv('pred_label', index = False)
return siamese_features_array
if __name__ == "__main__":
model_k = keras_train(train, y, 10)
#np.save("siamese_features_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_array)
# gen_sub(model_k, 'k', th, F1)
# ind = np.array([i * 5 for i in range(9)])
# xgbTrain(siamese_features_array[:, ind], lgbm_train_label);
#lgbm_features = siamese_features_array #np.concatenate((lgbm_train_data, siamese_features_array),
# model_l = lgbm_train(train, y, 10) #lgbm_features, lgbm_train_label, 10)#model_k)
# siamese_features_test_array = siamese_test(model_k[0][0], test)
#np.save("siamese_features_test_array" + \
# strftime('_%Y_%m_%d_%H_%M_%S', gmtime()) , siamese_features_test_array)
##model_x = xgbTrain(5)#model_k)
#gen_sub(model_l, siamese_features_test_array) #model_k)
| apache-2.0 |
DonBeo/scikit-learn | sklearn/tests/test_kernel_approximation.py | 242 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
heli522/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 341 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
miguelcleon/ODM2-Admin | templatesAndSettings/urls.py | 1 | 14286 | # from ajax_select import urls as ajax_select_urls
from ajax_select import views as ajax_select_views
from django.core.management import settings
from django.urls import include, re_path
from django.contrib import admin
from django.http.response import HttpResponseRedirect
from django.conf.urls.static import static
from django.views.decorators.cache import cache_page
# from ODM2CZOData import views # How can I use config file for this??
from django.urls import reverse
import importlib
views = importlib.import_module("{}.views".format(settings.APP_NAME))
admin.autodiscover()
admin.site.site_header = settings.SITE_HEADER
admin.site.site_title = settings.SITE_TITLE
app_name="odm2admin"
# admin_site.admin_view()
urlpatterns = [re_path(r'^' + '', admin.site.urls),
#ajax_select_views.ajax_lookup, name='ajax_lookup'
#below line needed instead of import ajax select urls as they haven't been fixed for django 2.0
re_path(r'^lookups/(?P<channel>[-\w]+)$',ajax_select_views.ajax_lookup,name='ajax_lookup'),
# url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# url(r'^oauthview$', views.oauth_view, name='oauth_view'),
# re_path('', include('social_django.urls', namespace='social')),
# url(r'^login/$', auth_views.login, name='login'),
# url(r'^logout/$', auth_views.logout, name='logout'),
re_path(r'^oauth/', include('social_django.urls', namespace='social')),
re_path(r'^$', lambda r: HttpResponseRedirect('{}/'.format(settings.APP_NAME))),
re_path(r'^' + 'AddSensor', views.AddSensor, name="AddSensor"),
re_path(r'^' + 'chartIndex', views.chartIndex,
name="chartIndex"),
re_path(r'^' + 'AddProfile', views.AddProfile,
name="AddProfile"),
re_path(r'^' + 'RecordAction', views.RecordAction,
name="RecordAction"),
re_path(r'^' + 'ManageCitations', views.ManageCitations,
name="ManageCitations"),
re_path(r'^' + 'chart', views.TimeSeriesGraphing,
name="TimeSeriesGraphing"),
re_path(r'^' + 'mapdata', views.web_map, name="WebMap"),
# url(r'^' + ^login/$', login, {'template_name': 'login.html'}),
re_path(r'^' + 'graph/$', views.TimeSeriesGraphing),
re_path(r'^' + 'graph/featureaction=(?P<feature_action>(\d+))/$',
views.TimeSeriesGraphing, name="TimeSeriesGraphing"),
# (?:featureaction-(?P<featureaction>\d+)/)?$ (?P<variable_a>(\d+)
# (?P<feature_action>(\d+))
re_path(r'^' + 'emaildata/$', views.email_data_from_graph),
re_path(r'^' + 'export_to_hydroshare/$', views.export_to_hydroshare),
re_path(r'^' + 'addannotation/$', views.add_annotation),
re_path(r'^' + 'addoffset/$', views.add_offset),
re_path(r'^' + 'addshiftvals/$', views.add_shiftvalues),
re_path(r'^' + 'addL1timeseries/$', views.addL1timeseries),
re_path(r'^' + 'processdlfile/$', views.procDataLoggerFile),
re_path(r'^' + 'preprocessdlfile/$', views.preProcDataLoggerFile),
re_path(r'^' + 'sensordashboard/featureaction=(?P<feature_action>(\d+))/', views.sensor_dashboard),
re_path(r'^' + 'sensordashboard/samplingfeature=(?P<sampling_feature>(\d+))/', views.sensor_dashboard),
re_path(r'^' + 'sensordashboard/$', views.sensor_dashboard),
re_path(r'^' + 'graphfa/featureaction=(?P<feature_action>(\d+))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/featureaction=(?P<feature_action>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/featureaction=(?P<feature_action>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=\[(?P<resultidu>\d+(, \d+)*\])/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/featureaction=(?P<feature_action>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(r'^' + 'graph/samplingfeature=(?P<samplingfeature>(\d+))/$',
views.TimeSeriesGraphing, name="TimeSeriesGraphing"),
# (?:featureaction-(?P<featureaction>\d+)/)?$ (?P<variable_a>(\d+)
# (?P<feature_action>(\d+))
re_path(r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(r'^' + 'mappopup/featureaction=(?P<feature_action>(\d+))/$',
views.mappopuploader,
name="mappopuploader"),
re_path(
r'^' + 'mappopup/samplingfeature=(?P<samplingfeature>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.mappopuploader, name="mappopuploader"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'dischargeresult=(?P<dischargeresult>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}\s+\d+:\d+))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}\s+\d+:\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/samplingfeature=(?P<samplingfeature>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'dischargeresult=(?P<dischargeresult>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}\s+\d+:\d+))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}\s+\d+:\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(r'^' + 'graphfa/dataset=(?P<dataset>(\d+))/$',
views.TimeSeriesGraphingShort,
name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/dataset=(?P<dataset>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/dataset=(?P<dataset>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'graphfa/dataset=(?P<dataset>(\d+))/'
'resultidu=(?P<resultidu>(\d+))/'
'startdate=(?P<startdate>(\d{4}-\d{2}-\d{2}))/'
'enddate=(?P<enddate>(\d{4}-\d{2}-\d{2}))/$',
views.TimeSeriesGraphingShort, name="TimeSeriesGraphingShort"),
re_path(
r'^' + 'profilegraph/'
'samplingfeature=(?P<samplingfeature>(\d+))/$',
views.graph_data, name="graph_data"),
re_path(
r'^' + 'profilegraph/'
'samplingfeature=(?P<samplingfeature>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.graph_data, name="graph_data"),
re_path(
r'^' + 'profilegraph/'
'selectedrelatedfeature='
'(?P<selectedrelatedfeature>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.graph_data, name="graph_data"),
re_path(
r'^' + 'profilegraph/'
'selectedrelatedfeature='
'(?P<selectedrelatedfeature>(\d+))/'
'samplingfeature=(?P<samplingfeature>(\d+))/'
'popup=(?P<popup>(([a-zA-Z][a-zA-Z][a-zA-Z][a-zA-Z])))/$',
views.graph_data, name="graph_data"),
# /(?P<startdate>(\d+))/(?P<enddate>(\d+))
re_path(r'^' + 'chartVariableAndFeature.html', views.graph_data,
name="graph_data"),
re_path(r'^' + 'soilsscatterplot.html', views.scatter_plot,
name="scatter_plot"),
re_path(r'^' + 'publications.html', views.publications,
name="publications"),
re_path(r'^' + 'features/type=(?P<sf_type>([\w\s]+,?)+)&'
'datasets=(?P<ds_ids>([a-zA-Z]+)?(([0-9]+,?)+)?)', cache_page(settings.CACHE_TTL)(views.get_features)),
# url(r'^' + 'pubview/citationid=(?P<citationid>(\d+))/$',
# views.add_pub,
# name="add_pub"),
# url(r'^' + 'pubview', views.add_pub),
# for uploaded files like dataloggerfiles
# url(r'^' + MEDIA_URL +'(?P<path>.*)$', 'django.views.static.serve', {
# 'document_root': MEDIA_ROOT,
# }),
# url(r'^' + 'upfiles/(?P<path>.*)$', 'django.views.static.serve',
# {'document_root': MEDIA_ROOT}),
# url(r'^admin/DataloggerfilecolumnsDisplay.html',
# views.dataloggercolumnView,
# name="dataloggercolumnView"),
# url(r'^contas_pagar/pagamento/(?P<id_parcela>\d+)/$',
# 'contas_pagar.views.retorna_pagamentos_parcela')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit |
openai/triton | python/test/unit/language/test_core.py | 1 | 59255 | # flake8: noqa: F821,F841
import itertools
import re
from typing import Optional, Union
import numpy as np
import pytest
import torch
from numpy.random import RandomState
import triton
import triton._C.libtriton.triton as _triton
import triton.language as tl
from triton.runtime.jit import JITFunction, TensorWrapper, reinterpret
int_dtypes = ['int8', 'int16', 'int32', 'int64']
uint_dtypes = ['uint8', 'uint16', 'uint32', 'uint64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + uint_dtypes + float_dtypes
dtypes_with_bfloat16 = dtypes + ['bfloat16']
torch_dtypes = ['bool'] + int_dtypes + ['uint8'] + float_dtypes + ['bfloat16']
def _bitwidth(dtype: str) -> int:
# ex.: "int64" -> 64
return int(re.search(r'(\d+)$', dtype).group(1))
def numpy_random(shape, dtype_str, rs: Optional[RandomState] = None, low=None, high=None):
"""
Override `rs` if you're calling this function twice and don't want the same
result for both calls.
"""
if isinstance(shape, int):
shape = (shape, )
if rs is None:
rs = RandomState(seed=17)
if dtype_str in int_dtypes + uint_dtypes:
iinfo = np.iinfo(getattr(np, dtype_str))
low = iinfo.min if low is None else max(low, iinfo.min)
high = iinfo.max if high is None else min(high, iinfo.max)
dtype = getattr(np, dtype_str)
x = rs.randint(low, high, shape, dtype=dtype)
x[x == 0] = 1 # Hack. Never return zero so tests of division don't error out.
return x
elif dtype_str in float_dtypes:
return rs.normal(0, 1, shape).astype(dtype_str)
elif dtype_str == 'bfloat16':
return (rs.normal(0, 1, shape).astype('float32').view('uint32')
& np.uint32(0xffff0000)).view('float32')
elif dtype_str in ['bool', 'int1', 'bool_']:
return rs.normal(0, 1, shape) > 0.0
else:
raise RuntimeError(f'Unknown dtype {dtype_str}')
def to_triton(x: np.ndarray, device='cuda', dst_type=None) -> Union[TensorWrapper, torch.Tensor]:
'''
Note: We need dst_type because the type of x can be different from dst_type.
For example: x is of type `float32`, dst_type is `bfloat16`.
If dst_type is None, we infer dst_type from x.
'''
t = x.dtype.name
if t in uint_dtypes:
signed_type_name = t.lstrip('u') # e.g. "uint16" -> "int16"
x_signed = x.astype(getattr(np, signed_type_name))
return reinterpret(torch.tensor(x_signed, device=device), getattr(tl, t))
else:
if t == 'float32' and dst_type == 'bfloat16':
return torch.tensor(x, device=device).bfloat16()
return torch.tensor(x, device=device)
def torch_dtype_name(dtype) -> str:
if isinstance(dtype, triton.language.dtype):
return dtype.name
elif isinstance(dtype, torch.dtype):
# 'torch.int64' -> 'int64'
m = re.match(r'^torch\.(\w+)$', str(dtype))
return m.group(1)
else:
raise TypeError(f'not a triton or torch dtype: {type(dtype)}')
def to_numpy(x):
if isinstance(x, TensorWrapper):
return x.base.cpu().numpy().astype(getattr(np, torch_dtype_name(x.dtype)))
elif isinstance(x, torch.Tensor):
if x.dtype is torch.bfloat16:
return x.cpu().float().numpy()
return x.cpu().numpy()
else:
raise ValueError(f"Not a triton-compatible tensor: {x}")
def patch_kernel(template, to_replace):
kernel = triton.JITFunction(template.fn)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
def check_type_supported(dtype):
'''
skip test if dtype is not supported on the current device
'''
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 80 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16):
pytest.skip("bfloat16 is only supported on NVGPU with cc >= 80")
@pytest.mark.parametrize("dtype_x", [dtype_x for dtype_x in dtypes] + ["bfloat16"])
def test_empty_kernel(dtype_x, device='cuda'):
SIZE = 128
@triton.jit
def kernel(X, SIZE: tl.constexpr):
pass
check_type_supported(dtype_x)
x = to_triton(numpy_random(SIZE, dtype_str=dtype_x), device=device, dst_type=dtype_x)
kernel[(1, )](x, SIZE=SIZE, num_warps=4)
# generic test functions
def _test_unary(dtype_x, expr, numpy_expr=None, device='cuda'):
check_type_supported(dtype_x) # early return if dtype_x is not supported
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = numpy_random(SIZE, dtype_str=dtype_x)
if 'log' in expr:
x = np.abs(x) + 0.01
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
z_tri = to_triton(np.empty_like(z_ref), device=device, dst_type=dtype_x)
kernel[(1, )](z_tri, x_tri, SIZE=SIZE, num_warps=4)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
def _binary_op_dtype_override(a: str, b: str) -> Optional[np.dtype]:
"""
Given two dtype strings, returns the numpy dtype Triton thinks binary
operations on the two types should return. Returns None if the return value
matches numpy. This is generally needed because Triton and pytorch return
narrower floating point types than numpy in mixed operations, and because
Triton follows C/C++ semantics around mixed signed/unsigned operations, and
numpy/pytorch do not.
"""
overrides = {
('float16', 'int16'): np.float16,
('float16', 'int32'): np.float16,
('float16', 'int64'): np.float16,
('float16', 'uint16'): np.float16,
('float16', 'uint32'): np.float16,
('float16', 'uint64'): np.float16,
('int8', 'uint8'): np.uint8,
('int8', 'uint16'): np.uint16,
('int8', 'uint32'): np.uint32,
('int8', 'uint64'): np.uint64,
('int16', 'uint16'): np.uint16,
('int16', 'uint32'): np.uint32,
('int16', 'uint64'): np.uint64,
('int32', 'uint32'): np.uint32,
('int32', 'uint64'): np.uint64,
('int64', 'uint64'): np.uint64,
}
key = (a, b) if a < b else (b, a)
return overrides.get(key)
def _test_binary(dtype_x, dtype_y, expr, numpy_expr=None, mode_x='real', mode_y='real', device='cuda', y_low=None, y_high=None):
check_type_supported(dtype_x) # early return if dtype_x is not supported
check_type_supported(dtype_y)
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
rs = RandomState(17)
x = numpy_random(SIZE, dtype_str=dtype_x, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype_y, rs=rs, low=y_low, high=y_high)
if mode_x == 'nan':
x[:] = float('nan')
if mode_y == 'nan':
y[:] = float('nan')
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
dtype_z = _binary_op_dtype_override(dtype_x, dtype_y)
if dtype_z is not None:
z_ref = z_ref.astype(dtype_z)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
y_tri = to_triton(y, device=device, dst_type=dtype_y)
z_tri = to_triton(np.empty(SIZE, dtype=z_ref.dtype), device=device)
kernel[(1, )](z_tri, x_tri, y_tri, SIZE=SIZE, num_warps=4)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), err_msg=expr, rtol=0.01)
def _mod_operation_ill_conditioned(dtype_x, dtype_y) -> bool:
# The result of x % y is ill-conditioned if x % y is much smaller than x.
# pytorch/CUDA has slightly different (probably better) rounding on
# remainders than stock LLVM. We currently don't expect to match it
# bit-for-bit.
return (dtype_x, dtype_y) in [
('int32', 'bfloat16'),
('int32', 'float16'),
('int32', 'float32'),
('int64', 'bfloat16'),
('int64', 'float16'),
('int64', 'float32'),
('int64', 'float64'),
('uint16', 'bfloat16'),
('uint16', 'float16'),
('uint16', 'float32'),
('uint32', 'bfloat16'),
('uint32', 'float16'),
('uint32', 'float32'),
('uint64', 'bfloat16'),
('uint64', 'float16'),
('uint64', 'float32'),
('uint64', 'float64'),
]
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['+', '-', '*', '/', '%']
for dtype_x in dtypes_with_bfloat16
for dtype_y in dtypes_with_bfloat16
])
def test_bin_op(dtype_x, dtype_y, op, device='cuda'):
expr = f' x {op} y'
if op == '%' and dtype_x in int_dtypes + uint_dtypes and dtype_y in int_dtypes + uint_dtypes:
# LLVM has 'numpy.fmod', not 'numpy.remainder', semantics on integer remainders.
numpy_expr = 'np.fmod(x, y)'
elif op in ('/', '%') and dtype_x in ('int16', 'float16', 'bfloat16') and dtype_y in ('int16', 'float16', 'bfloat16'):
# Triton promotes 16-bit floating-point / and % to 32-bit because there
# are no native div or FRem operations on float16. Since we have to
# convert anyway, we may as well take the accuracy bump.
numpy_expr = f'x.astype(np.float32) {op} y.astype(np.float32)'
elif (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if op == '%' and _mod_operation_ill_conditioned(dtype_x, dtype_y):
with pytest.raises(AssertionError, match='Not equal to tolerance'):
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
elif (op in ('%', '/') and
((dtype_x in int_dtypes and dtype_y in uint_dtypes) or
(dtype_x in uint_dtypes and dtype_y in int_dtypes))):
with pytest.raises(triton.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
assert re.match('Cannot use .* because they have different signedness', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y",
[(dtype_x, dtype_y) for dtype_x in int_dtypes for dtype_y in int_dtypes] +
[(dtype_x, dtype_y) for dtype_x in uint_dtypes for dtype_y in uint_dtypes]
)
def test_floordiv(dtype_x, dtype_y, device='cuda'):
# Triton has IEEE, not numpy/torch, semantics for %, and those carry
# through to //, so we have to use a nonstandard expression to get a
# reference result for //.
expr = 'x // y'
numpy_expr = '((x - np.fmod(x, y)) / y)'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['&', '|', '^']
for dtype_x in dtypes + dtypes_with_bfloat16
for dtype_y in dtypes + dtypes_with_bfloat16
])
def test_bitwise_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if 'float' in dtype_x + dtype_y:
with pytest.raises(triton.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr='np.array([])', device=device)
# The CompilationError must have been caused by a C++ exception with this text.
assert re.match('invalid operands of type', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['<<', '>>']
for dtype_x in int_dtypes + uint_dtypes
for dtype_y in int_dtypes + uint_dtypes
])
def test_shift_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
bw = max(_bitwidth(dtype_x), _bitwidth(dtype_y))
dtype_z = f'uint{bw}'
numpy_expr = f'x.astype(np.{dtype_z}) {op} y.astype(np.{dtype_z})'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device, y_low=0, y_high=65)
# ---------------
# test compare ops
# ---------------
ops = ['==', '!=', '>', '<', '>=', '<=']
@pytest.mark.parametrize("dtype_x, dtype_y, op, mode_x, mode_y",
# real
[
(dtype_x, dtype_y, op, 'real', 'real')
for op in ops
for dtype_x in dtypes
for dtype_y in dtypes
] +
# NaNs
[('float32', 'float32', op, mode_x, mode_y)
for op in ops
for mode_x, mode_y in [('nan', 'real'),
('real', 'nan'),
('nan', 'nan')]
])
def test_compare_op(dtype_x, dtype_y, op, mode_x, mode_y, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
_test_binary(dtype_x, dtype_y, expr, numpy_expr, mode_x=mode_x, mode_y=mode_y, device=device)
# ---------------
# test where
# ---------------
@pytest.mark.parametrize("dtype", dtypes_with_bfloat16 + ["*int32"])
def test_where(dtype):
select_ptrs = False
if dtype == "*int32":
dtype = "int64"
select_ptrs = True
check_type_supported(dtype)
@triton.jit
def where_kernel(cond_ptr, a_ptr, b_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr,
TEST_POINTERS: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
decide = tl.load(cond_ptr + offsets, mask=mask)
if TEST_POINTERS:
a = tl.load(a_ptr + offsets, mask=mask).to(tl.pi32_t)
b = tl.load(b_ptr + offsets, mask=mask).to(tl.pi32_t)
else:
a = tl.load(a_ptr + offsets, mask=mask)
b = tl.load(b_ptr + offsets, mask=mask)
output = tl.where(decide, a, b)
tl.store(output_ptr + offsets, output, mask=mask)
SIZE = 1_000
rs = RandomState(17)
cond = numpy_random(SIZE, 'bool', rs)
x = numpy_random(SIZE, dtype_str=dtype, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype, rs=rs)
z = np.where(cond, x, y)
cond_tri = to_triton(cond, device='cuda')
x_tri = to_triton(x, device='cuda', dst_type=dtype)
y_tri = to_triton(y, device='cuda', dst_type=dtype)
z_tri = to_triton(np.empty(SIZE, dtype=z.dtype), device='cuda', dst_type=dtype)
grid = lambda meta: (triton.cdiv(SIZE, meta['BLOCK_SIZE']),)
where_kernel[grid](cond_tri, x_tri, y_tri, z_tri, SIZE, BLOCK_SIZE=1024, TEST_POINTERS=select_ptrs)
assert (z == to_numpy(z_tri)).all()
def test_where_broadcast():
@triton.jit
def where_kernel(cond_ptr, a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr):
xoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [BLOCK_SIZE, 1])
yoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [1, BLOCK_SIZE])
mask = tl.load(cond_ptr + yoffsets)
vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets)
res = tl.where(mask, vals, 0.)
tl.store(out_ptr + yoffsets + BLOCK_SIZE * xoffsets, res)
@triton.jit
def where_scalar_condition(a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr):
xoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [BLOCK_SIZE, 1])
yoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [1, BLOCK_SIZE])
mask = 0
vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets)
res = tl.where(mask, vals, 0.)
tl.store(out_ptr + yoffsets + BLOCK_SIZE * xoffsets, res)
SIZE = 32
dtype = 'float32'
rs = RandomState(17)
x = numpy_random((SIZE, SIZE), dtype_str=dtype, rs=rs)
mask = numpy_random(SIZE, 'bool', rs=rs)
z = np.where(mask, x, 0)
cond_tri = to_triton(mask, device="cuda")
x_tri = to_triton(x, device='cuda', dst_type=dtype)
z_tri = to_triton(np.empty((SIZE, SIZE), dtype=z.dtype), device='cuda', dst_type=dtype)
where_kernel[(1,)](cond_tri, x_tri, z_tri, SIZE)
assert (z == to_numpy(z_tri)).all()
where_scalar_condition[(1,)](x_tri, z_tri, SIZE)
z = np.where(0, x, 0)
assert (z == to_numpy(z_tri)).all()
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, ' -x') for dtype_x in dtypes_with_bfloat16
] + [
(dtype_x, ' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.parametrize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'np.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr, dtype_str", [
(f'x[{s}]', d)
for s in ['None, :', ':, None', 'None, :, :', ':, :, None']
for d in ['int32', 'uint32', 'uint16']
])
def test_index1d(expr, dtype_str, device='cuda'):
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
shape_z_rank_mismatch = [32 for _ in range(rank_y + 1)]
shape_z_dim_mismatch = [64 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
def generate_kernel(shape_x, shape_z):
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
return patch_kernel(kernel, to_replace)
kernel_match = generate_kernel(shape_x, shape_z)
kernel_dim_mismatch = generate_kernel(shape_x, shape_z_dim_mismatch)
kernel_rank_mismatch = generate_kernel(shape_x, shape_z_rank_mismatch)
# torch result
x = numpy_random(shape_x, dtype_str=dtype_str)
y = np.zeros(shape_z, dtype=getattr(np, dtype_str))
z_ref = eval(expr) + y
# triton result
z_tri = to_triton(np.empty_like(z_ref), device=device)
x_tri = to_triton(x)
kernel_match[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
# compare
assert (z_ref == to_numpy(z_tri)).all()
def catch_compilation_error(kernel):
try:
kernel[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
except triton.CompilationError as e:
np.testing.assert_(True)
except BaseException:
np.testing.assert_(False)
catch_compilation_error(kernel_dim_mismatch)
catch_compilation_error(kernel_rank_mismatch)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x_str, mode", itertools.chain.from_iterable([
[
('add', 'float16', mode),
('add', 'uint32', mode), ('add', 'int32', mode), ('add', 'float32', mode),
('max', 'uint32', mode), ('max', 'int32', mode), ('max', 'float32', mode),
('min', 'uint32', mode), ('min', 'int32', mode), ('min', 'float32', mode),
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x_str, mode, device='cuda'):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 70:
if dtype_x_str == 'float16':
pytest.skip("Only test atomic float16 ops on devices with sm >= 70")
n_programs = 5
# triton kernel
@triton.jit
def kernel(X, Z):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
numpy_op = {'add': np.sum, 'max': np.max, 'min': np.min}[op]
max_neutral = float('-inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).min
min_neutral = float('inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
rs = RandomState(17)
x = numpy_random((n_programs, ), dtype_str=dtype_x_str, rs=rs)
if mode == 'all_neg':
x = -np.abs(x)
if mode == 'all_pos':
x = np.abs(x)
if mode == 'min_neg':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = -np.max(np.abs(x)) - 1
if mode == 'max_pos':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = np.max(np.abs(x)) + 1
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.array([neutral], dtype=getattr(np, dtype_x_str)), device=device)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = numpy_op(x).astype(getattr(np, dtype_x_str))
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == to_numpy(z_tri).item()
else:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
@pytest.mark.parametrize("axis", [0, 1])
def test_tensor_atomic_rmw(axis, device="cuda"):
shape0, shape1 = 8, 8
# triton kernel
@triton.jit
def kernel(Z, X, AXIS: tl.constexpr, SHAPE0: tl.constexpr, SHAPE1: tl.constexpr):
off0 = tl.arange(0, SHAPE0)
off1 = tl.arange(0, SHAPE1)
x = tl.load(X + off0[:, None] * SHAPE1 + off1[None, :])
z = tl.sum(x, axis=AXIS)
tl.atomic_add(Z + off0, z)
rs = RandomState(17)
x = numpy_random((shape0, shape1), dtype_str="float32", rs=rs)
# reference result
z_ref = np.sum(x, axis=axis)
# triton result
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.zeros((shape0,), dtype="float32"), device=device)
kernel[(1,)](z_tri, x_tri, axis, shape0, shape1)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=1e-4)
def test_atomic_cas():
# 1. make sure that atomic_cas changes the original value (Lock)
@triton.jit
def change_value(Lock):
tl.atomic_cas(Lock, 0, 1)
Lock = torch.zeros((1,), device='cuda', dtype=torch.int32)
change_value[(1,)](Lock)
assert (Lock[0] == 1)
# 2. only one block enters the critical section
@triton.jit
def serialized_add(data, Lock):
ptrs = data + tl.arange(0, 128)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
tl.store(ptrs, tl.load(ptrs) + 1.0)
# release lock
tl.atomic_xchg(Lock, 0)
Lock = torch.zeros((1,), device='cuda', dtype=torch.int32)
data = torch.zeros((128,), device='cuda', dtype=torch.float32)
ref = torch.full((128,), 64.0)
serialized_add[(64,)](data, Lock)
triton.testing.assert_almost_equal(data, ref)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False)
for dtype_x in dtypes
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True),
('float32', 'int1', False),
] + [
(f'uint{x}', f'int{x}', True) for x in [8, 16, 32, 64]
] + [
(f'int{x}', f'uint{x}', True) for x in [8, 16, 32, 64]
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
# This is tricky because numpy doesn't have bfloat, and torch doesn't have uints.
x0 = 43 if dtype_x in int_dtypes else 43.5
if dtype_x in float_dtypes and dtype_z == 'int1':
x0 = 0.5
if dtype_x.startswith('bfloat'):
x_tri = torch.tensor([x0], dtype=getattr(torch, dtype_x), device=device)
else:
x = np.array([x0], dtype=getattr(np, dtype_x))
x_tri = to_triton(x)
# triton kernel
@triton.jit
def kernel(X, Z, BITCAST: tl.constexpr):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=BITCAST)
tl.store(Z, z)
dtype_z_np = dtype_z if dtype_z != 'int1' else 'bool_'
# triton result
if dtype_z.startswith('bfloat'):
z_tri = torch.empty((1,), dtype=getattr(torch, dtype_z), device=device)
else:
z_tri = to_triton(np.empty((1, ), dtype=getattr(np, dtype_z_np)), device=device)
kernel[(1, )](x_tri, z_tri, BITCAST=bitcast)
# torch result
if dtype_z.startswith('bfloat') or dtype_x.startswith('bfloat'):
assert bitcast is False
z_ref = x_tri.to(z_tri.dtype)
assert z_tri == z_ref
else:
if bitcast:
z_ref = x.view(getattr(np, dtype_z_np))
else:
z_ref = x.astype(getattr(np, dtype_z_np))
assert to_numpy(z_tri) == z_ref
def test_store_bool():
"""Tests that boolean True is stored as 1"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
src = torch.tensor([True, False], dtype=torch.bool, device='cuda')
n_elements = src.numel()
dst = torch.empty_like(src)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](src, dst, n_elements, BLOCK_SIZE=1024)
assert (to_numpy(src).view('uint8') == to_numpy(dst).view('uint8')).all()
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_f8_xf16_roundtrip(dtype):
"""Tests that converting an f8 to f16 and back to f8 doesn't change its value"""
check_type_supported(dtype)
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')
f8 = triton.reinterpret(f8_tensor, tl.float8)
n_elements = f8_tensor.numel()
xf16 = torch.empty_like(f8_tensor, dtype=dtype)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f8, xf16, n_elements, BLOCK_SIZE=1024)
f8_output_tensor = torch.empty_like(xf16, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
copy_kernel[grid](xf16, f8_output, n_elements, BLOCK_SIZE=1024)
assert torch.all(f8_tensor == f8_output_tensor)
def test_f16_to_f8_rounding():
"""Takes all float16s, converts them to float8 and back to float16. Checks that the absolute
error is the minimum over all float8.
Or the same explanation a bit mathier:
for all f16 |f16 - fromf8(tof8(f16))| == min over all f8 |f16 - fromf8(f8)|"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
# torch.view with a dtype isn't supported in triton's torch yet so use numpy's view
f16_input_np = (
np.array(
range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=np.int16,
)
.view(np.float16)
)
f16_input = torch.tensor(f16_input_np, dtype=torch.float16, device='cuda')
n_elements = f16_input.numel()
f8_output_tensor = torch.empty_like(f16_input, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f16_input, f8_output, n_elements, BLOCK_SIZE=1024)
f16_output = torch.empty_like(f16_input, dtype=torch.float16)
copy_kernel[grid](f8_output, f16_output, n_elements, BLOCK_SIZE=1024)
abs_error = torch.abs(f16_input - f16_output)
all_f8_vals_tensor = torch.tensor(range(2 ** 8), dtype=torch.uint8, device='cuda')
all_f8_vals = triton.reinterpret(all_f8_vals_tensor, tl.float8)
all_f8_vals_in_f16 = torch.empty_like(all_f8_vals_tensor, dtype=torch.float16)
copy_kernel[grid](all_f8_vals, all_f8_vals_in_f16, n_elements=256, BLOCK_SIZE=1024)
all_finite_f8_vals_in_f16 = all_f8_vals_in_f16[
torch.isfinite(all_f8_vals_in_f16)
]
min_error = torch.min(
torch.abs(
f16_input.reshape((-1, 1))
- all_finite_f8_vals_in_f16.reshape((1, -1))
),
dim=1,
)[0]
# 1.9375 is float8 max
mismatch = torch.logical_and(
abs_error != min_error, torch.logical_and(torch.isfinite(f16_input), torch.abs(f16_input) < 1.9375)
)
assert torch.all(
torch.logical_not(mismatch)
), f"f16_input[mismatch]={f16_input[mismatch]} f16_output[mismatch]={f16_output[mismatch]} abs_error[mismatch]={abs_error[mismatch]} min_error[mismatch]={min_error[mismatch]}"
# ---------------
# test reduce
# ---------------
@pytest.mark.parametrize("op, dtype_str, shape",
[(op, dtype, shape)
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for dtype in dtypes_with_bfloat16
for shape in [32, 64, 128, 512]])
def test_reduce1d(op, dtype_str, shape, device='cuda'):
check_type_supported(dtype_str) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.store(Z, GENERATE_TEST_HERE)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis=0)'})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random((shape,), dtype_str=dtype_str, rs=rs)
x_tri = to_triton(x, device=device)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'argmin': np.argmin, 'argmax': np.argmax}[op]
# numpy result
z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str
z_tri_dtype_str = z_dtype_str
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
z_tri_dtype_str = 'bfloat16'
else:
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(numpy_random((1,), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK=shape)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op == 'argmin' or op == 'argmax':
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
np.testing.assert_equal(x[z_ref], x[z_tri])
else:
np.testing.assert_equal(z_ref, z_tri)
reduce_configs1 = [
(op, dtype, (1, 1024), axis) for dtype in dtypes_with_bfloat16
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for axis in [1]
]
reduce_configs2 = [
(op, 'float32', shape, axis)
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for shape in [(2, 32), (4, 32), (4, 128), (32, 64), (64, 128), (128, 256), (32, 1024)]
for axis in [0, 1]
]
@pytest.mark.parametrize("op, dtype_str, shape, axis", reduce_configs1 + reduce_configs2)
def test_reduce2d(op, dtype_str, shape, axis, device='cuda'):
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_M)
range_n = tl.arange(0, BLOCK_N)
x = tl.load(X + range_m[:, None] * BLOCK_N + range_n[None, :])
z = GENERATE_TEST_HERE
if AXIS == 1:
tl.store(Z + range_m, z)
else:
tl.store(Z + range_n, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis=AXIS)'})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
x_tri = to_triton(x)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'argmin': np.argmin, 'argmax': np.argmax}[op]
z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str
z_tri_dtype_str = z_dtype_str
# numpy result
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_tri_dtype_str = 'bfloat16'
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
else:
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(numpy_random((shape[1 - axis],), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op == 'argmin' or op == 'argmax':
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
z_ref_index = np.expand_dims(z_ref, axis=axis)
z_tri_index = np.expand_dims(z_tri, axis=axis)
z_ref_value = np.take_along_axis(x, z_ref_index, axis=axis)
z_tri_value = np.take_along_axis(x, z_tri_index, axis=axis)
np.testing.assert_equal(z_ref_value, z_tri_value)
else:
np.testing.assert_equal(z_ref, z_tri)
# ---------------
# test permute
# ---------------
@pytest.mark.parametrize("dtype_str, shape, perm",
[(dtype, shape, perm)
for dtype in ['bfloat16', 'float16', 'float32']
for shape in [(64, 64), (128, 128)]
for perm in [(1, 0)]])
def test_permute(dtype_str, shape, perm, device='cuda'):
check_type_supported(dtype_str) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xn,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
# input
x = numpy_random(shape, dtype_str=dtype_str)
# triton result
z_tri = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
z_tri_contiguous = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
x_tri = to_triton(x, device=device, dst_type=dtype_str)
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
z_tri, z_tri.stride(1), z_tri.stride(0),
BLOCK_M=shape[0], BLOCK_N=shape[1])
pgm_contiguous = kernel[(1, 1)](x_tri, x_tri.stride(1), x_tri.stride(0),
z_tri_contiguous, z_tri_contiguous.stride(0), z_tri_contiguous.stride(1),
BLOCK_M=shape[0], BLOCK_N=shape[1])
# numpy result
z_ref = x.transpose(*perm)
# compare
triton.testing.assert_almost_equal(z_tri, z_ref)
triton.testing.assert_almost_equal(z_tri_contiguous, z_ref)
# parse ptx to make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
ptx = pgm_contiguous.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
# ---------------
# test dot
# ---------------
@pytest.mark.parametrize("epilogue, allow_tf32, dtype",
[(epilogue, allow_tf32, dtype)
for epilogue in ['none', 'trans', 'add-matrix', 'add-rows', 'add-cols', 'softmax', 'chain-dot']
for allow_tf32 in [True, False]
for dtype in ['float16']
if not (allow_tf32 and (dtype in ['float16']))])
def test_dot(epilogue, allow_tf32, dtype, device='cuda'):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 70:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
if cc < 80:
if dtype == 'int8':
pytest.skip("Only test int8 on devices with sm >= 80")
elif dtype == 'float32' and allow_tf32:
pytest.skip("Only test tf32 on devices with sm >= 80")
M, N, K = 128, 128, 64
num_warps = 8
trans_a, trans_b = False, False
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xk,
Y, stride_yk, stride_yn,
W, stride_wn, stride_wl,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
ADD_MATRIX: tl.constexpr, ADD_ROWS: tl.constexpr, ADD_COLS: tl.constexpr,
ALLOW_TF32: tl.constexpr,
DO_SOFTMAX: tl.constexpr, CHAIN_DOT: tl.constexpr,
TRANS_A: tl.constexpr, TRANS_B: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
off_l = tl.arange(0, BLOCK_N)
off_k = tl.arange(0, BLOCK_K)
Xs = X + off_m[:, None] * stride_xm + off_k[None, :] * stride_xk
Ys = Y + off_k[:, None] * stride_yk + off_n[None, :] * stride_yn
Ws = W + off_n[:, None] * stride_wn + off_l[None, :] * stride_wl
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
z = tl.dot(tl.load(Xs), tl.load(Ys), trans_a=TRANS_A, trans_b=TRANS_B, allow_tf32=ALLOW_TF32)
if ADD_MATRIX:
z += tl.load(Zs)
if ADD_ROWS:
ZRs = Z + off_m * stride_zm
z += tl.load(ZRs)[:, None]
if ADD_COLS:
ZCs = Z + off_n * stride_zn
z += tl.load(ZCs)[None, :]
if DO_SOFTMAX:
max = tl.max(z, 1)
z = z - max[:, None]
num = tl.exp(z)
den = tl.sum(num, 1)
z = num / den[:, None]
if CHAIN_DOT:
# tl.store(Zs, z)
# tl.debug_barrier()
z = tl.dot(z.to(tl.float16), tl.load(Ws), trans_a=TRANS_A)
tl.store(Zs, z)
# input
rs = RandomState(17)
x = numpy_random((K, M) if trans_a else (M, K), dtype_str=dtype, rs=rs) * .1
y = numpy_random((N, K) if trans_b else (K, N), dtype_str=dtype, rs=rs) * .1
w = numpy_random((N, N), dtype_str=dtype, rs=rs) * .1
if allow_tf32:
x = (x.view('uint32') & np.uint32(0xffffe000)).view('float32')
y = (y.view('uint32') & np.uint32(0xffffe000)).view('float32')
w = (w.view('uint32') & np.uint32(0xffffe000)).view('float32')
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
w_tri = to_triton(w, device=device)
# triton result
z = 1 + numpy_random((M, N), dtype_str=dtype, rs=rs) * .1
z_tri = to_triton(z, device=device)
if epilogue == 'trans':
z_tri = torch.as_strided(z_tri, (M, N), z_tri.stride()[::-1])
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
y_tri, y_tri.stride(0), y_tri.stride(1),
w_tri, w_tri.stride(0), w_tri.stride(1),
z_tri, z_tri.stride(0), z_tri.stride(1),
TRANS_A=trans_a, TRANS_B=trans_b,
BLOCK_M=M, BLOCK_K=K, BLOCK_N=N,
ADD_MATRIX=epilogue == 'add-matrix',
ADD_ROWS=epilogue == 'add-rows',
ADD_COLS=epilogue == 'add-cols',
DO_SOFTMAX=epilogue == 'softmax',
CHAIN_DOT=epilogue == 'chain-dot',
ALLOW_TF32=allow_tf32,
num_warps=num_warps)
# torch result
x_ref = x.T if trans_a else x
y_ref = y.T if trans_b else y
z_ref = np.matmul(x_ref, y_ref)
if epilogue == 'add-matrix':
z_ref += z
if epilogue == 'add-rows':
z_ref += z[:, 0][:, None]
if epilogue == 'add-cols':
z_ref += z[0, :][None, :]
if epilogue == 'softmax':
num = np.exp(z_ref - np.max(z_ref, axis=-1, keepdims=True))
denom = np.sum(num, axis=-1, keepdims=True)
z_ref = num / denom
if epilogue == 'chain-dot':
z_ref = np.matmul(z_ref.T if trans_a else z_ref, w)
# compare
# print(z_ref[:,0], z_tri[:,0])
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
# make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
if allow_tf32:
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' in ptx
elif dtype == 'float32':
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' not in ptx
elif dtype == 'int8':
assert 'mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32' in ptx
def test_dot_without_load():
@triton.jit
def kernel(out):
pid = tl.program_id(axis=0)
a = tl.zeros((32, 32), tl.float32)
b = tl.zeros((32, 32), tl.float32)
c = tl.zeros((32, 32), tl.float32)
c = tl.dot(a, b)
pout = out + tl.arange(0, 32)[:, None] * 32 + tl.arange(0, 32)[None, :]
tl.store(pout, c)
out = torch.ones((32, 32), dtype=torch.float32, device="cuda")
kernel[(1,)](out)
# ---------------
# test arange
# ---------------
@pytest.mark.parametrize("start", [0, 1, 7, 16])
def test_arange(start, device='cuda'):
BLOCK = 128
z_tri = torch.empty(BLOCK, dtype=torch.int32, device=device)
@triton.jit
def _kernel(z, BLOCK: tl.constexpr,
START: tl.constexpr, END: tl.constexpr):
off = tl.arange(0, BLOCK)
val = tl.arange(START, END)
tl.store(z + off, val)
_kernel[(1,)](z_tri, START=start, END=start + BLOCK, BLOCK=BLOCK)
z_ref = torch.arange(start, BLOCK + start, dtype=torch.int32, device=device)
triton.testing.assert_almost_equal(z_tri, z_ref)
# ---------------
# test load
# ---------------
@pytest.mark.parametrize("dtype_str, size, size_diff", [(dtype_str, size, size_diff) for dtype_str in torch_dtypes for size in [128, 512] for size_diff in [1, 2, 3, 4]])
def test_masked_load(dtype_str, size, size_diff, device='cuda'):
dtype = getattr(torch, dtype_str)
check_type_supported(dtype) # bfloat16 on cc < 80 will not be tested
input_size = size - size_diff
output_size = size
if dtype_str == 'bool':
input = torch.randint(0, 2, (input_size,), dtype=dtype, device=device)
elif dtype_str in int_dtypes or dtype_str in uint_dtypes:
input = torch.randint(0, 127, (input_size,), dtype=dtype, device=device)
else:
input = torch.rand(input_size, dtype=dtype, device=device)
output = torch.zeros((output_size,), dtype=dtype, device=device)
@triton.jit
def _kernel(in_ptr, out_ptr, in_size: tl.constexpr, out_size: tl.constexpr):
in_offsets = tl.arange(0, out_size)
# Load inputs.
x = tl.load(in_ptr + in_offsets, mask=in_offsets < in_size, other=1.0)
# Store output
output_offsets = tl.arange(0, out_size)
tl.store(out_ptr + output_offsets, x)
_kernel[(1,)](input, output, input_size, output_size)
reference_out = input
reference_out = torch.cat((reference_out, torch.ones((size_diff,), dtype=dtype, device=device)))
triton.testing.allclose(output, reference_out)
# 'bfloat16': torch.bfloat16,
# Testing masked loads with an intermate copy to shared memory run.
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32])
def test_masked_load_shared_memory(dtype, device='cuda'):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 70:
pytest.skip("Only test tl.dot() on devices with sm >= 70")
check_type_supported(dtype) # bfloat16 on cc < 80 will not be tested
M = 32
N = 32
K = 16
in1 = torch.rand((M, K), dtype=dtype, device=device)
in2 = torch.rand((K, N), dtype=dtype, device=device)
out = torch.zeros((M, N), dtype=dtype, device=device)
@triton.jit
def _kernel(in1_ptr, in2_ptr, output_ptr,
in_stride, in2_stride, out_stride,
in_numel, in2_numel, out_numel,
M: tl.constexpr, N: tl.constexpr, K: tl.constexpr):
M_offsets = tl.arange(0, M)
N_offsets = tl.arange(0, N)
K_offsets = tl.arange(0, K)
in_offsets = M_offsets[:, None] * in_stride + K_offsets[None, :]
in2_offsets = K_offsets[:, None] * in2_stride + N_offsets[None, :]
# Load inputs.
x = tl.load(in1_ptr + in_offsets, mask=in_offsets < in_numel)
w = tl.load(in2_ptr + in2_offsets, mask=in2_offsets < in2_numel)
# Without a dot product the memory doesn't get promoted to shared.
o = tl.dot(x, w)
# Store output
output_offsets = M_offsets[:, None] * out_stride + N_offsets[None, :]
tl.store(output_ptr + output_offsets, o, mask=output_offsets < in2_numel)
pgm = _kernel[(1,)](in1, in2, out,
in1.stride()[0],
in2.stride()[0],
out.stride()[0],
in1.numel(),
in2.numel(),
out.numel(),
M=M, N=N, K=K)
reference_out = torch.matmul(in1, in2)
triton.testing.allclose(out, reference_out)
@pytest.mark.parametrize("cache", ["", ".ca", ".cg"])
def test_load_cache_modifier(cache):
src = torch.empty(128, device='cuda')
dst = torch.empty(128, device='cuda')
@triton.jit
def _kernel(dst, src, CACHE: tl.constexpr):
offsets = tl.arange(0, 128)
x = tl.load(src + offsets, cache_modifier=CACHE)
tl.store(dst + offsets, x)
pgm = _kernel[(1,)](dst, src, CACHE=cache)
ptx = pgm.asm['ptx']
if cache == '':
assert 'ld.global.ca' not in ptx
assert 'ld.global.cg' not in ptx
if cache == '.cg':
assert 'ld.global.cg' in ptx
assert 'ld.global.ca' not in ptx
if cache == '.ca':
assert 'ld.global.ca' in ptx
assert 'ld.global.cg' not in ptx
@pytest.mark.parametrize("N", [16, 10, 11, 1024])
def test_vectorization(N):
src = torch.empty(1024, device='cuda')
dst = torch.empty(1024, device='cuda')
@triton.jit
def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(src + offsets, mask=offsets < N)
tl.store(dst + offsets, x, mask=offsets < N)
pgm = _kernel[(1,)](dst, src, N=N, BLOCK_SIZE=src.shape[0])
ptx = pgm.asm["ptx"]
if N % 16 == 0:
assert "ld.global.v4.b32" in ptx
else:
assert "ld.global.b32" in ptx
# triton.testing.assert_almost_equal(dst, src[:N])
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
# ---------------
# test default
# ---------------
# TODO: can't be local to test_default
@triton.jit
def _impl(value=10):
return value
def test_default():
value = 5
ret0 = torch.zeros(1, dtype=torch.int32, device='cuda')
ret1 = torch.zeros(1, dtype=torch.int32, device='cuda')
@triton.jit
def _kernel(ret0, ret1, value):
tl.store(ret0, _impl())
tl.store(ret1, _impl(value))
_kernel[(1,)](ret0, ret1, value)
assert ret0.item() == 10
assert ret1.item() == value
# ---------------
# test noop
# ----------------
def test_noop(device='cuda'):
@triton.jit
def kernel(x):
pass
x = to_triton(numpy_random((1,), dtype_str='int32'), device=device)
kernel[(1, )](x)
@pytest.mark.parametrize("value, value_type", [
(-1, 'i32'), (0, 'i32'), (-2**31, 'i32'), (2**31 - 1, 'i32'),
(2**31, 'u32'), (2**32 - 1, 'u32'), (2**32, 'i64'), (2**63 - 1, 'i64'),
(-2**63, 'i64'), (2**63, 'u64'), (2**64 - 1, 'u64')
])
def test_value_specialization(value: int, value_type: str, device='cuda') -> None:
spec_type = None
def cache_hook(*args, **kwargs):
nonlocal spec_type
spec_type = kwargs["compile"]["signature"][0]
JITFunction.cache_hook = cache_hook
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device='cuda')
pgm = kernel[(1, )](value, x)
JITFunction.cache_hook = None
assert spec_type == value_type
@pytest.mark.parametrize(
"value, overflow",
[(2**64 - 1, False), (2**64, True), (-2**63, False), (-2**63 - 1, True)]
)
def test_value_specialization_overflow(value: int, overflow: bool, device='cuda') -> None:
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device='cuda')
if overflow:
with pytest.raises(OverflowError):
kernel[(1, )](value, x)
else:
kernel[(1, )](value, x)
# ----------------
# test constexpr
# ----------------
@pytest.mark.parametrize("op", ['+', '-', '*', '/', '%', '<', '>'])
@pytest.mark.parametrize("is_lhs_constexpr", [False, True])
@pytest.mark.parametrize("is_rhs_constexpr", [True, False])
def test_bin_op_constexpr(op, is_lhs_constexpr, is_rhs_constexpr):
@triton.jit
def kernel(Z, X, Y):
x = tl.load(X)
y = tl.load(Y)
z = GENERATE_TEST_HERE
tl.store(Z, z)
x_str = "3.14" if is_lhs_constexpr else "x"
y_str = "4.13" if is_rhs_constexpr else "y"
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f"{x_str} {op} {y_str}"})
x = numpy_random((1,), dtype_str="float32")
y = numpy_random((1,), dtype_str="float32")
z = np.array(eval(f"{x_str} {op} {y_str}"))
x_tri = to_triton(x)
y_tri = to_triton(y)
z_tri = to_triton(np.empty((1,), dtype=z.dtype))
kernel[(1,)](z_tri, x_tri, y_tri)
np.testing.assert_allclose(z, to_numpy(z_tri))
def test_constexpr_shape():
@triton.jit
def kernel(X):
off = tl.arange(0, 128 + 128)
tl.store(X + off, off)
x_tri = to_triton(np.empty((256, ), dtype=np.int32))
kernel[(1,)](x_tri)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256))
def test_constexpr_scalar_shape():
@triton.jit
def kernel(X, s):
off = tl.arange(0, 256)
val = off % (256 // s)
tl.store(X + off, val)
x_tri = to_triton(np.empty((256, ), dtype=np.int32))
kernel[(1,)](x_tri, 32)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256) % 8)
# -------------
# test call
# -------------
@triton.jit
def val_multiplier(val, i):
return val * i
@triton.jit
def vecmul_kernel(ptr, n_elements, rep):
pid = tl.program_id(axis=0)
offsets = pid * 128 + tl.arange(0, 128)
mask = offsets < n_elements
vec = tl.load(ptr + offsets, mask=mask)
for i in range(1, rep):
vec = val_multiplier(vec, i)
tl.store(ptr + offsets, vec, mask=mask)
def test_call():
@triton.jit
def kernel(ptr, n_elements, num1, num2):
vecmul_kernel(ptr, n_elements, num1)
vecmul_kernel(ptr, n_elements, num2)
size = 1024
rand_val = numpy_random((size,), dtype_str="float32")
rand_val_tri = to_triton(rand_val, device='cuda')
kernel[(size // 128,)](rand_val_tri, size, 3, 5)
ans = rand_val * 1 * 2 * 1 * 2 * 3 * 4
np.testing.assert_equal(to_numpy(rand_val_tri), ans)
# -------------
# test if
# -------------
def test_if():
@triton.jit
def kernel(Cond, XTrue, XFalse, Ret):
pid = tl.program_id(0)
cond = tl.load(Cond)
if pid % 2:
tl.store(Ret, tl.load(XTrue))
else:
tl.store(Ret, tl.load(XFalse))
cond = torch.ones(1, dtype=torch.int32, device='cuda')
x_true = torch.tensor([3.14], dtype=torch.float32, device='cuda')
x_false = torch.tensor([1.51], dtype=torch.float32, device='cuda')
ret = torch.empty(1, dtype=torch.float32, device='cuda')
kernel[(1,)](cond, x_true, x_false, ret)
def test_num_warps_pow2():
dst = torch.empty(128, device='cuda')
@triton.jit
def _kernel(dst):
pass
with pytest.raises(AssertionError, match='must be a power of 2'):
_kernel[(1,)](dst=dst, num_warps=3)
_kernel[(1,)](dst=dst, num_warps=1)
_kernel[(1,)](dst=dst, num_warps=2)
_kernel[(1,)](dst=dst, num_warps=4)
# -------------
# test extern
# -------------
@pytest.mark.parametrize("dtype_str, expr, lib_path",
[('int32', 'libdevice.ffs', ''),
('float32', 'libdevice.pow', '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'),
('float64', 'libdevice.norm4d', '')])
def test_libdevice_tensor(dtype_str, expr, lib_path):
@triton.jit
def kernel(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = GENERATE_TEST_HERE
tl.store(Y + tl.arange(0, BLOCK), y)
shape = (128, )
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
if expr == 'libdevice.ffs':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.ffs(x)'})
y_ref = np.zeros(shape, dtype=x.dtype)
for i in range(shape[0]):
y_ref[i] = (int(x[i]) & int(-x[i])).bit_length()
elif expr == 'libdevice.pow':
# numpy does not allow negative factors in power, so we use abs()
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.pow(x, x)'})
y_ref = np.power(x, x)
elif expr == 'libdevice.norm4d':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.norm4d(x, x, x, x)'})
y_ref = np.sqrt(4 * np.power(x, 2))
x_tri = to_triton(x)
# triton result
y_tri = to_triton(numpy_random((shape[0],), dtype_str=dtype_str, rs=rs), device='cuda')
kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path})
# compare
if expr == 'libdevice.ffs':
np.testing.assert_equal(y_ref, to_numpy(y_tri))
else:
np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01)
@pytest.mark.parametrize("dtype_str, expr, lib_path",
[('float32', 'libdevice.pow', '')])
def test_libdevice_scalar(dtype_str, expr, lib_path):
@triton.jit
def kernel(X, Y, BLOCK: tl.constexpr):
x = X
y = GENERATE_TEST_HERE
tl.store(Y + tl.arange(0, BLOCK), y)
shape = (128, )
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random((1,), dtype_str=dtype_str, rs=rs)
y_ref = np.zeros(shape, dtype=x.dtype)
# numpy does not allow negative factors in power, so we use abs()
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.pow(x, x)'})
y_ref[:] = np.power(x, x)
# triton result
x_tri = to_triton(x)[0].item()
y_tri = to_triton(numpy_random((shape[0],), dtype_str=dtype_str, rs=rs), device='cuda')
kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path})
# compare
np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01)
| mit |
KellyChan/Python | python/sklearn/examples/general/recognizing_hand-written_digits.py | 3 | 1991 | #---------------------------------------------------------------#
# Project: Recognizing hand-written digits
# Author: Kelly Chan
# Date: Apr 23 2014
#---------------------------------------------------------------#
print(__doc__)
import pylab as pl
from sklearn import datasets, svm, metrics
def loadData():
digits = datasets.load_digits()
return digits
def loadPic(digits):
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index+1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
def reshapeData(digits):
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
return data, n_samples
def createSVM():
classifier = svm.SVC(gamma=0.001)
return classifier
def classify(classifier, digits, data, n_samples):
expected = digits.target[n_samples/2: ]
classifier.fit(data[:n_samples/2], digits.target[:n_samples/2])
predicted = classifier.predict(data[n_samples/2:])
print("Classification report for classifier %s: \n%s\n" \
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" \
% metrics.confusion_matrix(expected, predicted))
return expected, predicted
def plotPredict(digits, predicted, n_samples):
for index, (image, prediction) in enumerate(\
zip(digits.images[n_samples/2:], predicted)[:4]):
pl.subplot(2, 4, index+5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction %i' % prediction)
pl.show()
def test():
digits = loadData()
loadPic(digits)
data, n_samples = reshapeData(digits)
classifier = createSVM()
expected, predicted = classify(classifier, digits, data, n_samples)
plotPredict(digits, predicted, n_samples)
if __name__ == '__main__':
test()
| mit |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_categories_1200ms_scaled_method_v_force_area.py | 1 | 4711 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[41:82,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:20]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
elpaso/QGIS | python/plugins/processing/tests/GdalAlgorithmsRasterTest.py | 4 | 122173 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmRasterTest.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : matthias@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
import nose2
import os
import shutil
import tempfile
from qgis.core import (QgsProcessingContext,
QgsProcessingFeedback,
QgsRectangle)
from qgis.testing import (start_app,
unittest)
import AlgorithmsTestBase
from processing.algs.gdal.AssignProjection import AssignProjection
from processing.algs.gdal.ClipRasterByExtent import ClipRasterByExtent
from processing.algs.gdal.ClipRasterByMask import ClipRasterByMask
from processing.algs.gdal.ColorRelief import ColorRelief
from processing.algs.gdal.GridAverage import GridAverage
from processing.algs.gdal.GridDataMetrics import GridDataMetrics
from processing.algs.gdal.GridInverseDistance import GridInverseDistance
from processing.algs.gdal.GridInverseDistanceNearestNeighbor import GridInverseDistanceNearestNeighbor
from processing.algs.gdal.GridLinear import GridLinear
from processing.algs.gdal.GridNearestNeighbor import GridNearestNeighbor
from processing.algs.gdal.gdal2tiles import gdal2tiles
from processing.algs.gdal.gdalcalc import gdalcalc
from processing.algs.gdal.gdaltindex import gdaltindex
from processing.algs.gdal.contour import contour
from processing.algs.gdal.gdalinfo import gdalinfo
from processing.algs.gdal.hillshade import hillshade
from processing.algs.gdal.aspect import aspect
from processing.algs.gdal.buildvrt import buildvrt
from processing.algs.gdal.proximity import proximity
from processing.algs.gdal.rasterize import rasterize
from processing.algs.gdal.retile import retile
from processing.algs.gdal.translate import translate
from processing.algs.gdal.warp import warp
from processing.algs.gdal.fillnodata import fillnodata
from processing.algs.gdal.rearrange_bands import rearrange_bands
from processing.algs.gdal.gdaladdo import gdaladdo
from processing.algs.gdal.sieve import sieve
from processing.algs.gdal.gdal2xyz import gdal2xyz
from processing.algs.gdal.polygonize import polygonize
from processing.algs.gdal.pansharp import pansharp
from processing.algs.gdal.merge import merge
from processing.algs.gdal.nearblack import nearblack
from processing.algs.gdal.slope import slope
from processing.algs.gdal.rasterize_over import rasterize_over
from processing.algs.gdal.rasterize_over_fixed_value import rasterize_over_fixed_value
from processing.algs.gdal.viewshed import viewshed
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
class TestGdalRasterAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest):
@classmethod
def setUpClass(cls):
start_app()
from processing.core.Processing import Processing
Processing.initialize()
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
for path in cls.cleanup_paths:
shutil.rmtree(path)
def test_definition_file(self):
return 'gdal_algorithm_raster_tests.yaml'
def testAssignProjection(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = AssignProjection()
alg.initAlgorithm()
# with target srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': 'EPSG:3111'}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:3111 ' +
source])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': custom_crs}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:20936 ' +
source])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': custom_crs}, context, feedback),
['gdal_edit.py',
'-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
source])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'CRS': 'POSTGIS:3111'}, context, feedback),
['gdal_edit.py',
'-a_srs EPSG:3111 ' +
source])
def testGdalTranslate(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
translate_alg = translate()
translate_alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with None NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': None,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 9999.0 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 0.0 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_nodata 0.0 ' +
'-ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target srs
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:3111 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:20936 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with non-EPSG crs code
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-a_srs EPSG:3111 ' +
'-of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with copy subdatasets
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'COPY_SUBDATASETS': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_translate',
'-sds ' +
'-of GTiff ' +
source + ' ' +
outdir + '/check.tif'])
# additional parameters
self.assertEqual(
translate_alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-strict -unscale -epo',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-of JPEG -strict -unscale -epo ' +
source + ' ' +
outdir + '/check.jpg'])
def testClipRasterByExtent(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = ClipRasterByExtent()
alg.initAlgorithm()
extent = QgsRectangle(1, 2, 3, 4)
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 9999.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'DATA_TYPE': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTENT': extent,
'EXTRA': '-s_srs EPSG:4326 -tps -tr 0.1 0.1',
'DATA_TYPE': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_translate',
'-projwin 0.0 0.0 0.0 0.0 -of JPEG -s_srs EPSG:4326 -tps -tr 0.1 0.1 ' +
source + ' ' +
outdir + '/check.jpg'])
def testClipRasterByMask(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'polys.gml')
alg = ClipRasterByMask()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline ' + source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 9999.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'NODATA': 0,
'DATA_TYPE': 6,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-ot Float32 -of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -dstnodata 0.0 ' + source + ' ' +
outdir + '/check.jpg'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' +
source + ' ' +
outdir + '/check.jpg'])
# with multothreading and additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK': mask,
'MULTITHREADING': True,
'EXTRA': '-nosrcalpha -wm 2048 -nomd',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-of JPEG -cutline ' +
mask + ' -cl polys2 -crop_to_cutline -multi -nosrcalpha -wm 2048 -nomd ' +
source + ' ' +
outdir + '/check.jpg'])
def testContour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = contour()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'NODATA': 9999,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -snodata 9999.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD_NAME': 'elev',
'INTERVAL': 5,
'NODATA': 0,
'OUTPUT': outdir + '/check.gpkg'}, context, feedback),
['gdal_contour',
'-b 1 -a elev -i 5.0 -snodata 0.0 -f "GPKG" ' +
source + ' ' +
outdir + '/check.gpkg'])
# with CREATE_3D
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CREATE_3D': True,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -3d -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with IGNORE_NODATA and OFFSET
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'IGNORE_NODATA': True,
'OFFSET': 100,
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -inodata -off 100.0 -f "ESRI Shapefile" ' +
source + ' ' +
outdir + '/check.shp'])
# with additional command line parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-e 3 -amin MIN_H',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -e 3 -amin MIN_H ' +
source + ' ' +
outdir + '/check.shp'])
# obsolete OPTIONS param
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': '-fl 100 125 150 200',
'OUTPUT': outdir + '/check.shp'}, context, feedback),
['gdal_contour',
'-b 1 -a ELEV -i 10.0 -f "ESRI Shapefile" -fl 100 125 150 200 ' +
source + ' ' +
outdir + '/check.shp'])
def testGdal2Tiles(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdal2tiles()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average ' +
source + ' ' +
outdir + '/'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': -9999,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -a -9999.0 ' +
source + ' ' +
outdir + '/'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -a 0.0 ' +
source + ' ' +
outdir + '/'])
# with input srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:3111 ' +
source + ' ' +
outdir + '/'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:20936 ' +
source + ' ' +
outdir + '/'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" ' +
source + ' ' +
outdir + '/'])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/'}, context, feedback),
['gdal2tiles.py',
'-p mercator -w all -r average -s EPSG:3111 ' +
source + ' ' +
outdir + '/'])
def testGdalCalc(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdalcalc()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
output = outdir + '/check.jpg'
# default execution
formula = 'A*2' # default formula
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)])
# check that formula is not escaped and formula is returned as it is
formula = 'A * 2' # <--- add spaces in the formula
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --outfile {}'.format(formula, source, output)])
# additional creation options
formula = 'A*2'
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --co COMPRESS=JPEG --co JPEG_QUALITY=75 --outfile {}'.format(formula, source, output)])
# additional parameters
formula = 'A*2'
self.assertEqual(
alg.getConsoleCommands({'INPUT_A': source,
'BAND_A': 1,
'FORMULA': formula,
'EXTRA': '--debug --quiet',
'OUTPUT': output}, context, feedback),
['gdal_calc.py',
'--calc "{}" --format JPEG --type Float32 -A {} --A_band 1 --debug --quiet --outfile {}'.format(formula, source, output)])
def testGdalInfo(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdalinfo()
alg.initAlgorithm()
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
source])
source = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'"' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': True,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'-mm "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': True,
'NO_METADATA': False,
'STATS': False}, context, feedback),
['gdalinfo',
'-nogcp "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': True,
'STATS': False}, context, feedback),
['gdalinfo',
'-nomd "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': True}, context, feedback),
['gdalinfo',
'-stats "' + source + '"'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MIN_MAX': False,
'NOGCP': False,
'NO_METADATA': False,
'STATS': False,
'EXTRA': '-proj4 -listmdd -checksum'}, context, feedback),
['gdalinfo',
'-proj4 -listmdd -checksum "' + source + '"'])
def testGdalTindex(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = gdaltindex()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
commands = alg.getConsoleCommands({'LAYERS': [source],
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with input srs
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs EPSG:20936 -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn('-tileindex location -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -f "ESRI Shapefile" ' + outdir + '/test.shp', commands[1])
self.assertIn('--optfile ', commands[1])
# with non-EPSG crs code
commands = alg.getConsoleCommands({'LAYERS': [source],
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/test.shp'}, context, feedback)
self.assertEqual(len(commands), 2)
self.assertEqual(commands[0], 'gdaltindex')
self.assertIn(
'-tileindex location -t_srs EPSG:3111 -f "ESRI Shapefile" ' + outdir + '/test.shp',
commands[1])
self.assertIn('--optfile ', commands[1])
def testGridAverage(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridAverage()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.jpg'])
def testGridDataMetrics(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridDataMetrics()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# non-default datametrics
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'METRIC': 4,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a average_distance:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridInverseDistance(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridInverseDistance()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a invdist:power=2.0:smothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridInverseDistanceNearestNeighbour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridInverseDistanceNearestNeighbor()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a invdistnn:power=2.0:smothing=0.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 ' +
'-ot Float32 -of GTiff -z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridLinear(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridLinear()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a linear:radius=-1.0:nodata=0.0 -ot Float32 -of GTiff ' +
'-z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testGridNearestNeighbour(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'points.gml')
alg = GridNearestNeighbor()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-z_multiply 1.5 -outsize 1754 1394',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdal_grid',
'-l points -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 -ot Float32 -of GTiff ' +
'-z_multiply 1.5 -outsize 1754 1394 ' +
source + ' ' +
outdir + '/check.tif'])
def testHillshade(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = hillshade()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
'"' + source_with_space + '" ' +
'"{}/check out.tif" -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -alg ZevenbergenThorne'])
# with COMBINED
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'COMBINED': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -az 90.0 -alt 20.0 -combined'])
# with multidirectional - "az" argument is not allowed!
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'Z_FACTOR': 5,
'SCALE': 2,
'AZIMUTH': 90,
'ALTITUDE': 20,
'MULTIDIRECTIONAL': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 5.0 -s 2.0 -alt 20.0 -multidirectional'])
# defaults with additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'hillshade ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -z 1.0 -s 1.0 -az 315.0 -alt 45.0 -q'])
def testAspect(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = aspect()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'aspect ' +
'"' + source_with_space + '" ' +
'"{}/check out.tif" -of GTiff -b 1'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': True,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -alg ZevenbergenThorne'])
# with ZERO_FLAT
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': True,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -zero_for_flat'])
# with TRIG_ANGLE
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': True,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -trigonometric'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'TRIG_ANGLE': False,
'ZERO_FLAT': False,
'COMPUTE_EDGES': False,
'ZEVENBERGEN': False,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'aspect ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -q'])
def testSlope(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = slope()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0'])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -compute_edges'])
# with ZEVENBERGEN
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'ZEVENBERGEN': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -alg ZevenbergenThorne'])
# custom ratio
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'SCALE': 2.0,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 2.0'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.tif -of GTiff -b 1 -s 1.0 -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdaldem',
'slope ' +
source + ' ' +
outdir + '/check.jpg -of JPEG -b 1 -s 1.0 -q'])
def testColorRelief(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
colorTable = os.path.join(testDataPath, 'colors.txt')
alg = ColorRelief()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1'])
# paths with space
source_with_space = os.path.join(testDataPath, 'raster with spaces.tif')
self.assertEqual(
alg.getConsoleCommands({'INPUT': source_with_space,
'BAND': 1,
'COLOR_TABLE': colorTable,
'OUTPUT': outdir + '/check out.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
'"' + source_with_space + '" ' +
colorTable + ' ' +
'"{}/check out.tif" -of GTiff -b 1'.format(outdir)])
# compute edges
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'COMPUTE_EDGES': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -compute_edges'])
# with custom matching mode
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'MATCH_MODE': 1,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry'])
# with creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'MATCH_MODE': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -nearest_color_entry -co COMPRESS=JPEG -co JPEG_QUALITY=75'])
# with additional parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'COLOR_TABLE': colorTable,
'EXTRA': '-alpha -q',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdaldem',
'color-relief ' +
source + ' ' +
colorTable + ' ' +
outdir + '/check.tif -of GTiff -b 1 -alpha -q'])
def testProximity(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = proximity()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# without NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'BAND': 2,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 2 -distunits PIXEL -nodata 9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'BAND': 1,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-dstband 2 -values 3,4,12',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_proximity.py',
'-srcband 1 -distunits PIXEL -ot Float32 -of JPEG -dstband 2 -values 3,4,12 ' +
source + ' ' +
outdir + '/check.jpg'])
def testRasterize(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'polys.gml')
alg = rasterize()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -a_nodata 9999.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" INIT value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'INIT': 0,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -init 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'FIELD': 'id',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -a_nodata 0.0 -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'FIELD': 'id',
'EXTRA': '-at -add',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -ts 0.0 0.0 -ot Float32 -of JPEG -at -add ' +
source + ' ' +
outdir + '/check.jpg'])
def testRasterizeOver(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
raster = os.path.join(testDataPath, 'dem.tif')
vector = os.path.join(testDataPath, 'polys.gml')
alg = rasterize_over()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'ADD': True,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -add ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'FIELD': 'id',
'EXTRA': '-i',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -a id -i ' +
vector + ' ' + raster])
def testRasterizeOverFixed(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
raster = os.path.join(testDataPath, 'dem.tif')
vector = os.path.join(testDataPath, 'polys.gml')
alg = rasterize_over_fixed_value()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'ADD': True,
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 -add ' +
vector + ' ' + raster])
self.assertEqual(
alg.getConsoleCommands({'INPUT': vector,
'BURN': 100,
'EXTRA': '-i',
'INPUT_RASTER': raster}, context, feedback),
['gdal_rasterize',
'-l polys2 -burn 100.0 -i ' +
vector + ' ' + raster])
def testRetile(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = retile()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -targetDir {} '.format(outdir) +
source])
# with input srs
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:20936 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': custom_crs,
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'SOURCE_CRS': 'POSTGIS:3111',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -s_srs EPSG:3111 -r near -ot Float32 -targetDir {} {}'.format(outdir, source)
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': [source],
'EXTRA': '-v -tileIndex tindex.shp',
'OUTPUT': outdir}, context, feedback),
['gdal_retile.py',
'-ps 256 256 -overlap 0 -levels 1 -r near -ot Float32 -v -tileIndex tindex.shp -targetDir {} '.format(outdir) +
source])
def testWarp(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = warp()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# with no NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with None NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': None,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 9999,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 9999.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 0.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with "0" NODATA value and custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NODATA': 0,
'DATA_TYPE': 6,
'SOURCE_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -dstnodata 0.0 -r near -ot Float32 -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using EPSG
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_CRS': 'EPSG:4326',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -t_srs EPSG:4326 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using proj string
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:20936 -t_srs EPSG:20936 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs,
'TARGET_CRS': custom_crs,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -t_srs "+proj=utm +zone=36 +south +a=63785 +b=6357 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs" -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target using custom projection and user-defined extent
custom_crs2 = 'proj4: +proj=longlat +a=6378388 +b=6356912 +no_defs'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': custom_crs2,
'TARGET_CRS': custom_crs2,
'TARGET_EXTENT': '18.67,18.70,45.78,45.81',
'TARGET_EXTENT_CRS': custom_crs2,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['gdalwarp',
'-s_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -t_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -r near -te 18.67 45.78 18.7 45.81 -te_srs "+proj=longlat +a=6378388 +b=6356912 +no_defs" -of GTiff ' +
source + ' ' +
outdir + '/check.tif'])
# with non-EPSG crs code
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'POSTGIS:3111',
'TARGET_CRS': 'POSTGIS:3111',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -t_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with target resolution with None value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': None,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# test target resolution with a valid value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': 10.0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -tr 10.0 10.0 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# test target resolution with a value of zero, to be ignored
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'SOURCE_CRS': 'EPSG:3111',
'TARGET_RESOLUTION': 0.0,
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-s_srs EPSG:3111 -r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
# with additional command-line parameter
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha -srcnodata -9999',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha -srcnodata -9999 ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-dstalpha -srcnodata "-9999 -8888"',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG -dstalpha -srcnodata "-9999 -8888" ' +
source + ' ' +
outdir + '/check.jpg'])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '',
'OUTPUT': outdir + '/check.jpg'}, context, feedback),
['gdalwarp',
'-r near -of JPEG ' +
source + ' ' +
outdir + '/check.jpg'])
def testMerge(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = [os.path.join(testDataPath, 'dem1.tif'), os.path.join(testDataPath, 'dem1.tif')]
alg = merge()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# this algorithm creates temporary text file with input layers
# so we strip its path, leaving only filename
cmd = alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# separate
cmd = alg.getConsoleCommands({'INPUT': source,
'SEPARATE': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-separate -ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# assign nodata
cmd = alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-tap -ps 0.1 0.1',
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-ot Float32 -of GTiff -tap -ps 0.1 0.1 ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
# additional parameters
cmd = alg.getConsoleCommands({'INPUT': source,
'NODATA_OUTPUT': -9999,
'OUTPUT': outdir + '/check.tif'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('--optfile') + 10] + t[t.find('mergeInputFiles.txt'):]
self.assertEqual(cmd,
['gdal_merge.py',
'-a_nodata -9999 -ot Float32 -of GTiff ' +
'-o ' + outdir + '/check.tif ' +
'--optfile mergeInputFiles.txt'])
def testNearblack(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = nearblack()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15'])
# search white pixels
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'WHITE': True,
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -white'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-nb 5 -setalpha',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -nb 5 -setalpha'])
# additional parameters and creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'EXTRA': '-nb 5 -setalpha',
'OUTPUT': outdir + '/check.tif'}, context, feedback),
['nearblack',
source + ' -of GTiff -o ' + outdir + '/check.tif ' +
'-near 15 -co COMPRESS=JPEG -co JPEG_QUALITY=75 -nb 5 -setalpha'])
def testRearrangeBands(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = rearrange_bands()
alg.initAlgorithm()
# single band
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': 1,
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 1 ' +
'-of GTiff ' +
source + ' ' + outsource])
# three bands, re-ordered
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': [3, 2, 1],
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 3 -b 2 -b 1 ' +
'-of GTiff ' +
source + ' ' + outsource])
# three bands, re-ordered with custom data type
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BANDS': [3, 2, 1],
'DATA_TYPE': 6,
'OUTPUT': outsource}, context, feedback),
['gdal_translate', '-b 3 -b 2 -b 1 ' +
'-ot Float32 -of GTiff ' +
source + ' ' + outsource])
def testFillnodata(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = fillnodata()
alg.initAlgorithm()
# with mask value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'MASK_LAYER': mask,
'NO_MASK': False,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -mask ' +
mask +
' -of GTiff ' +
source + ' ' +
outsource])
# without mask value
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'NO_MASK': False,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 ' +
'-of GTiff ' +
source + ' ' +
outsource])
# nomask true
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'DISTANCE': 10,
'ITERATIONS': 0,
'NO_MASK': True,
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -nomask ' +
'-of GTiff ' +
source + ' ' +
outsource])
# creation options
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'OPTIONS': 'COMPRESS=JPEG|JPEG_QUALITY=75',
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -of GTiff -co COMPRESS=JPEG -co JPEG_QUALITY=75 ' +
source + ' ' +
outsource])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'EXTRA': '-q',
'OUTPUT': outsource}, context, feedback),
['gdal_fillnodata.py',
'-md 10 -b 1 -of GTiff -q ' +
source + ' ' +
outsource])
def testGdalAddo(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
alg = gdaladdo()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest 2 4 8 16'])
# with "clean" option
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': True,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest -clean 2 4 8 16'])
# ovr format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 1}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest -ro 2 4 8 16'])
# Erdas format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 2}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest --config USE_RRD YES 2 4 8 16'])
# custom resampling method format
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'RESAMPLING': 4,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r cubicspline 2 4 8 16'])
# more levels
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16 32 64',
'CLEAN': False,
'RESAMPLING': 0,
'FORMAT': 0}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest 2 4 8 16 32 64'])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False,
'EXTRA': '--config COMPRESS_OVERVIEW JPEG'}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest --config COMPRESS_OVERVIEW JPEG 2 4 8 16'])
# without advanced params
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'LEVELS': '2 4 8 16',
'CLEAN': False}, context, feedback),
['gdaladdo',
source + ' ' + '-r nearest 2 4 8 16'])
def testSieve(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
mask = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.tif'
alg = sieve()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -of GTiff ' +
source + ' ' +
outsource])
# Eight connectedness and custom threshold
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'THRESHOLD': 16,
'EIGHT_CONNECTEDNESS': True,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 16 -8 -of GTiff ' +
source + ' ' +
outsource])
# without default mask layer
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'NO_MASK': True,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -nomask -of GTiff ' +
source + ' ' +
outsource])
# defaults with external validity mask
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'MASK_LAYER': mask,
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -mask ' +
mask +
' -of GTiff ' +
source + ' ' +
outsource])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'EXTRA': '-q',
'OUTPUT': outsource}, context, feedback),
['gdal_sieve.py',
'-st 10 -4 -of GTiff -q ' +
source + ' ' +
outsource])
def testGdal2Xyz(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.csv'
alg = gdal2xyz()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CSV': False,
'OUTPUT': outsource}, context, feedback),
['gdal2xyz.py',
'-band 1 ' +
source + ' ' +
outsource])
# csv output
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'CSV': True,
'OUTPUT': outsource}, context, feedback),
['gdal2xyz.py',
'-band 1 -csv ' +
source + ' ' +
outsource])
def testGdalPolygonize(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/check.shp'
alg = polygonize()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "ESRI Shapefile" check DN'
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'VAL',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "ESRI Shapefile" check VAL'
])
# 8 connectedness
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': True,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-8 -b 1 -f "ESRI Shapefile" check DN'
])
# custom output format
outsource = outdir + '/check.gpkg'
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EIGHT_CONNECTEDNESS': False,
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "GPKG" check DN'
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'INPUT': source,
'BAND': 1,
'FIELD': 'DN',
'EXTRA': '-nomask -q',
'OUTPUT': outsource}, context, feedback),
['gdal_polygonize.py',
source + ' ' +
outsource + ' ' +
'-b 1 -f "GPKG" -nomask -q check DN'
])
def testGdalPansharpen(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
panchrom = os.path.join(testDataPath, 'dem.tif')
spectral = os.path.join(testDataPath, 'raster.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/out.tif'
alg = pansharp()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r cubic -of GTiff'
])
# custom resampling
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'RESAMPLING': 4,
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r lanczos -of GTiff'
])
# additional parameters
self.assertEqual(
alg.getConsoleCommands({'SPECTRAL': spectral,
'PANCHROMATIC': panchrom,
'EXTRA': '-bitdepth 12 -threads ALL_CPUS',
'OUTPUT': outsource}, context, feedback),
['gdal_pansharpen.py',
panchrom + ' ' +
spectral + ' ' +
outsource + ' ' +
'-r cubic -of GTiff -bitdepth 12 -threads ALL_CPUS'
])
def testGdalViewshed(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
dem = os.path.join(testDataPath, 'dem.tif')
with tempfile.TemporaryDirectory() as outdir:
outsource = outdir + '/out.tif'
alg = viewshed()
alg.initAlgorithm()
# defaults
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 2,
'OBSERVER': '18.67274,45.80599',
'OBSERVER_HEIGHT': 1.8,
'TARGET_HEIGHT': 20,
'MAX_DISTANCE': 1000,
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 2 -ox 18.67274 -oy 45.80599 -oz 1.8 -tz 20.0 -md 1000.0 -f GTiff ' +
dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'EXTRA': '-a_nodata=-9999 -cc 0.2',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
'-a_nodata=-9999 -cc 0.2 ' + dem + ' ' + outsource
])
self.assertEqual(
alg.getConsoleCommands({'INPUT': dem,
'BAND': 1,
'OBSERVER': '18.67274,45.80599',
'OPTIONS': 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
'OUTPUT': outsource}, context, feedback),
['gdal_viewshed',
'-b 1 -ox 18.67274 -oy 45.80599 -oz 1.0 -tz 1.0 -md 100.0 -f GTiff ' +
'-co COMPRESS=DEFLATE -co PREDICTOR=2 -co ZLEVEL=9 ' + dem + ' ' + outsource
])
def testBuildVrt(self):
context = QgsProcessingContext()
feedback = QgsProcessingFeedback()
source = os.path.join(testDataPath, 'dem.tif')
alg = buildvrt()
alg.initAlgorithm()
with tempfile.TemporaryDirectory() as outdir:
# defaults
cmd = alg.getConsoleCommands({'INPUT': [source],
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# custom resolution
cmd = alg.getConsoleCommands({'INPUT': [source],
'RESOLUTION': 2,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution lowest -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# single layer
cmd = alg.getConsoleCommands({'INPUT': [source],
'SEPARATE': False,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# projection difference
cmd = alg.getConsoleCommands({'INPUT': [source],
'PROJ_DIFFERENCE': True,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -allow_projection_difference -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# add alpha band
cmd = alg.getConsoleCommands({'INPUT': [source],
'ADD_ALPHA': True,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -addalpha -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# assign CRS
cmd = alg.getConsoleCommands({'INPUT': [source],
'ASSIGN_CRS': 'EPSG:3111',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -a_srs EPSG:3111 -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
custom_crs = 'proj4: +proj=utm +zone=36 +south +a=6378249.145 +b=6356514.966398753 +towgs84=-143,-90,-294,0,0,0,0 +units=m +no_defs'
cmd = alg.getConsoleCommands({'INPUT': [source],
'ASSIGN_CRS': custom_crs,
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -a_srs EPSG:20936 -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# source NODATA
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '-9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -srcnodata "-9999" ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '-9999 9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -srcnodata "-9999 9999" ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
cmd = alg.getConsoleCommands({'INPUT': [source],
'SRC_NODATA': '',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
# additional parameters
cmd = alg.getConsoleCommands({'INPUT': [source],
'EXTRA': '-overwrite -optim RASTER -vrtnodata -9999',
'OUTPUT': outdir + '/check.vrt'}, context, feedback)
t = cmd[1]
cmd[1] = t[:t.find('-input_file_list') + 17] + t[t.find('buildvrtInputFiles.txt'):]
self.assertEqual(cmd,
['gdalbuildvrt',
'-resolution average -separate -r nearest -overwrite -optim RASTER -vrtnodata -9999 ' +
'-input_file_list buildvrtInputFiles.txt ' +
outdir + '/check.vrt'])
if __name__ == '__main__':
nose2.main()
| gpl-2.0 |
heli522/scikit-learn | benchmarks/bench_20newsgroups.py | 370 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 220 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/metrics/__init__.py | 10 | 3328 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
hasgeek/funnel | funnel/models/project.py | 1 | 32050 | """Project model."""
from __future__ import annotations
from typing import Iterable, List, Optional
from sqlalchemy.orm.collections import attribute_mapped_collection
from werkzeug.utils import cached_property
from pytz import utc
from baseframe import __, localize_timezone
from coaster.sqlalchemy import LazyRoleSet, StateManager, with_roles
from coaster.utils import LabeledEnum, buid, utcnow
from .. import app
from ..typing import OptionalMigratedTables
from . import (
BaseScopedNameMixin,
JsonDict,
Mapped,
MarkdownColumn,
TimestampMixin,
TimezoneType,
TSVectorType,
UrlType,
UuidMixin,
db,
sa,
)
from .comment import SET_TYPE, Commentset
from .helpers import (
RESERVED_NAMES,
ImgeeType,
add_search_trigger,
markdown_content_options,
reopen,
valid_name,
visual_field_delimiter,
)
from .profile import Profile
from .user import User
__all__ = ['Project', 'ProjectLocation', 'ProjectRedirect']
# --- Constants ---------------------------------------------------------------
class PROJECT_STATE(LabeledEnum): # noqa: N801
DRAFT = (1, 'draft', __("Draft"))
PUBLISHED = (2, 'published', __("Published"))
WITHDRAWN = (3, 'withdrawn', __("Withdrawn"))
DELETED = (4, 'deleted', __("Deleted"))
DELETABLE = {DRAFT, PUBLISHED, WITHDRAWN}
PUBLISHABLE = {DRAFT, WITHDRAWN}
class CFP_STATE(LabeledEnum): # noqa: N801
NONE = (1, 'none', __("None"))
PUBLIC = (2, 'public', __("Public"))
CLOSED = (3, 'closed', __("Closed"))
ANY = {NONE, PUBLIC, CLOSED}
# --- Models ------------------------------------------------------------------
class Project(UuidMixin, BaseScopedNameMixin, db.Model): # type: ignore[name-defined]
__tablename__ = 'project'
reserved_names = RESERVED_NAMES
user_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'), nullable=False)
user = sa.orm.relationship(
User,
primaryjoin=user_id == User.id,
backref=sa.orm.backref('projects', cascade='all'),
)
profile_id = sa.Column(sa.Integer, sa.ForeignKey('profile.id'), nullable=False)
profile: sa.orm.relationship[Profile] = with_roles(
sa.orm.relationship(
Profile, backref=sa.orm.backref('projects', cascade='all', lazy='dynamic')
),
read={'all'},
# If account grants an 'admin' role, make it 'profile_admin' here
grants_via={None: {'admin': 'profile_admin'}},
# `profile` only appears in the 'primary' dataset. It must not be included in
# 'related' or 'without_parent' as it is the parent
datasets={'primary'},
)
parent = sa.orm.synonym('profile')
tagline: sa.Column[str] = with_roles(
sa.Column(sa.Unicode(250), nullable=False),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
description = with_roles(
MarkdownColumn(
'description', default='', nullable=False, options=markdown_content_options
),
read={'all'},
)
instructions = with_roles(
MarkdownColumn(
'instructions', default='', nullable=True, options=markdown_content_options
),
read={'all'},
)
location = with_roles(
sa.Column(sa.Unicode(50), default='', nullable=True),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
parsed_location = sa.Column(JsonDict, nullable=False, server_default='{}')
website = with_roles(
sa.Column(UrlType, nullable=True),
read={'all'},
datasets={'primary', 'without_parent'},
)
timezone = with_roles(
sa.Column(TimezoneType(backend='pytz'), nullable=False, default=utc),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
_state = sa.Column(
'state',
sa.Integer,
StateManager.check_constraint('state', PROJECT_STATE),
default=PROJECT_STATE.DRAFT,
nullable=False,
index=True,
)
state = with_roles(
StateManager('_state', PROJECT_STATE, doc="Project state"), call={'all'}
)
_cfp_state = sa.Column(
'cfp_state',
sa.Integer,
StateManager.check_constraint('cfp_state', CFP_STATE),
default=CFP_STATE.NONE,
nullable=False,
index=True,
)
cfp_state = with_roles(
StateManager('_cfp_state', CFP_STATE, doc="CfP state"), call={'all'}
)
#: Audit timestamp to detect re-publishing to re-surface a project
first_published_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=True)
#: Timestamp of when this project was most recently published
published_at = with_roles(
sa.Column(sa.TIMESTAMP(timezone=True), nullable=True, index=True),
read={'all'},
write={'promoter'},
datasets={'primary', 'without_parent', 'related'},
)
#: Optional start time for schedule, cached from column property schedule_start_at
start_at = with_roles(
sa.Column(sa.TIMESTAMP(timezone=True), nullable=True, index=True),
read={'all'},
write={'editor'},
datasets={'primary', 'without_parent', 'related'},
)
#: Optional end time for schedule, cached from column property schedule_end_at
end_at = with_roles(
sa.Column(sa.TIMESTAMP(timezone=True), nullable=True, index=True),
read={'all'},
write={'editor'},
datasets={'primary', 'without_parent', 'related'},
)
cfp_start_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=True, index=True)
cfp_end_at = sa.Column(sa.TIMESTAMP(timezone=True), nullable=True, index=True)
bg_image = with_roles(
sa.Column(ImgeeType, nullable=True),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
allow_rsvp: sa.Column[sa.Boolean] = with_roles(
sa.Column(sa.Boolean, default=True, nullable=False),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
buy_tickets_url: sa.Column[Optional[str]] = with_roles(
sa.Column(UrlType, nullable=True),
read={'all'},
datasets={'primary', 'without_parent', 'related'},
)
banner_video_url = with_roles(
sa.Column(UrlType, nullable=True),
read={'all'},
datasets={'primary', 'without_parent'},
)
boxoffice_data = with_roles(
sa.Column(JsonDict, nullable=False, server_default='{}'),
# This is an attribute, but we deliberately use `call` instead of `read` to
# block this from dictionary enumeration. FIXME: Break up this dictionary into
# individual columns with `all` access for ticket embed id and `promoter`
# access for ticket sync access token.
call={'all'},
)
hasjob_embed_url = with_roles(sa.Column(UrlType, nullable=True), read={'all'})
hasjob_embed_limit = with_roles(sa.Column(sa.Integer, default=8), read={'all'})
commentset_id = sa.Column(
sa.Integer, sa.ForeignKey('commentset.id'), nullable=False
)
commentset = sa.orm.relationship(
Commentset,
uselist=False,
cascade='all',
single_parent=True,
back_populates='project',
)
parent_id = sa.Column(
sa.Integer, sa.ForeignKey('project.id', ondelete='SET NULL'), nullable=True
)
parent_project: Mapped[Optional[Project]] = sa.orm.relationship(
'Project', remote_side='Project.id', backref='subprojects'
)
#: Featured project flag. This can only be set by website editors, not
#: project editors or account admins.
site_featured = with_roles(
sa.Column(sa.Boolean, default=False, nullable=False),
read={'all'},
write={'site_editor'},
datasets={'primary', 'without_parent'},
)
#: Version number maintained by SQLAlchemy, used for vCal files, starting at 1
versionid = with_roles(sa.Column(sa.Integer, nullable=False), read={'all'})
search_vector = sa.orm.deferred(
sa.Column(
TSVectorType(
'name',
'title',
'description_text',
'instructions_text',
'location',
weights={
'name': 'A',
'title': 'A',
'description_text': 'B',
'instructions_text': 'B',
'location': 'C',
},
regconfig='english',
hltext=lambda: sa.func.concat_ws(
visual_field_delimiter,
Project.title,
Project.location,
Project.description_html,
Project.instructions_html,
),
),
nullable=False,
)
)
livestream_urls = with_roles(
sa.Column(sa.ARRAY(sa.UnicodeText, dimensions=1), server_default='{}'),
read={'all'},
datasets={'primary', 'without_parent'},
)
__table_args__ = (
sa.UniqueConstraint('profile_id', 'name'),
sa.Index('ix_project_search_vector', 'search_vector', postgresql_using='gin'),
sa.CheckConstraint(
sa.or_( # type: ignore[arg-type]
sa.and_(start_at.is_(None), end_at.is_(None)),
sa.and_(start_at.isnot(None), end_at.isnot(None), end_at > start_at),
),
'project_start_at_end_at_check',
),
sa.CheckConstraint(
sa.or_( # type: ignore[arg-type]
sa.and_(cfp_start_at.is_(None), cfp_end_at.is_(None)),
sa.and_(cfp_start_at.isnot(None), cfp_end_at.is_(None)),
sa.and_(
cfp_start_at.isnot(None),
cfp_end_at.isnot(None),
cfp_end_at > cfp_start_at,
),
),
'project_cfp_start_at_cfp_end_at_check',
),
)
__mapper_args__ = {'version_id_col': versionid}
__roles__ = {
'all': {
'read': {
'absolute_url', # From UrlForMixin
'name', # From BaseScopedNameMixin
'short_title', # From BaseScopedNameMixin
'title', # From BaseScopedNameMixin
'urls', # From UrlForMixin
'created_at', # From TimestampMixin, used for ical render timestamp
'updated_at', # From TimestampMixin, used for ical render timestamp
},
'call': {
'features', # From RegistryMixin
'forms', # From RegistryMixin
'url_for', # From UrlForMixin
'view_for', # From UrlForMixin
'views', # From RegistryMixin
},
},
}
__datasets__ = {
'primary': {
'absolute_url', # From UrlForMixin
'name', # From BaseScopedNameMixin
'title', # From BaseScopedNameMixin
'urls', # From UrlForMixin
},
'without_parent': {
'absolute_url', # From UrlForMixin
'name', # From BaseScopedNameMixin
'title', # From BaseScopedNameMixin
},
'related': {
'absolute_url', # From UrlForMixin
'name', # From BaseScopedNameMixin
'title', # From BaseScopedNameMixin
},
}
state.add_conditional_state(
'PAST',
state.PUBLISHED,
lambda project: project.end_at is not None and utcnow() >= project.end_at,
lambda project: sa.func.utcnow() >= project.end_at,
label=('past', __("Past")),
)
state.add_conditional_state(
'LIVE',
state.PUBLISHED,
lambda project: (
project.start_at is not None
and project.start_at <= utcnow() < project.end_at
),
lambda project: sa.and_(
project.start_at <= sa.func.utcnow(),
sa.func.utcnow() < project.end_at,
),
label=('live', __("Live")),
)
state.add_conditional_state(
'UPCOMING',
state.PUBLISHED,
lambda project: project.start_at is not None and utcnow() < project.start_at,
lambda project: sa.func.utcnow() < project.start_at,
label=('upcoming', __("Upcoming")),
)
state.add_conditional_state(
'PUBLISHED_WITHOUT_SESSIONS',
state.PUBLISHED,
lambda project: project.start_at is None,
lambda project: project.start_at.is_(None),
label=('published_without_sessions', __("Published without sessions")),
)
cfp_state.add_conditional_state(
'HAS_PROPOSALS',
cfp_state.ANY,
lambda project: db.session.query( # type: ignore[has-type]
project.proposals.exists()
).scalar(),
label=('has_proposals', __("Has submissions")),
)
cfp_state.add_conditional_state(
'HAS_SESSIONS',
cfp_state.ANY,
lambda project: db.session.query( # type: ignore[has-type]
project.sessions.exists()
).scalar(),
label=('has_sessions', __("Has sessions")),
)
cfp_state.add_conditional_state(
'DRAFT',
cfp_state.NONE,
lambda project: project.instructions_html != '',
lambda project: sa.and_(
project.instructions_html.isnot(None), project.instructions_html != ''
),
label=('draft', __("Draft")),
)
cfp_state.add_conditional_state(
'OPEN',
cfp_state.PUBLIC,
lambda project: project.cfp_end_at is None or (utcnow() < project.cfp_end_at),
lambda project: sa.or_(
project.cfp_end_at.is_(None), sa.func.utcnow() < project.cfp_end_at
),
label=('open', __("Open")),
)
cfp_state.add_conditional_state(
'EXPIRED',
cfp_state.PUBLIC,
lambda project: project.cfp_end_at is not None
and utcnow() >= project.cfp_end_at,
lambda project: sa.and_(
project.cfp_end_at.isnot(None), sa.func.utcnow() >= project.cfp_end_at
),
label=('expired', __("Expired")),
)
cfp_state.add_state_group(
'OPENABLE',
cfp_state.CLOSED,
cfp_state.NONE,
cfp_state.EXPIRED,
)
cfp_state.add_state_group(
'UNAVAILABLE',
cfp_state.CLOSED,
cfp_state.EXPIRED,
)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.commentset = Commentset(settype=SET_TYPE.PROJECT)
# Add the creator as editor and promoter
new_membership = ProjectCrewMembership(
parent=self,
user=self.user,
granted_by=self.user,
is_editor=True,
is_promoter=True,
)
db.session.add(new_membership)
def __repr__(self) -> str:
"""Represent :class:`Project` as a string."""
return f'<Project {self.profile.name}/{self.name} "{self.title}">'
@with_roles(call={'editor'})
@cfp_state.transition(
cfp_state.OPENABLE,
cfp_state.PUBLIC,
title=__("Enable submissions"),
message=__("Submissions will be accepted until the optional closing date"),
type='success',
)
def open_cfp(self):
# If closing date is in the past, remove it
if self.cfp_end_at is not None and self.cfp_end_at <= utcnow():
self.cfp_end_at = None
# If opening date is not set, set it
if self.cfp_start_at is None:
self.cfp_start_at = sa.func.utcnow()
@with_roles(call={'editor'}) # skipcq: PTC-W0049
@cfp_state.transition(
cfp_state.PUBLIC,
cfp_state.CLOSED,
title=__("Disable submissions"),
message=__("Submissions will no longer be accepted"),
type='success',
)
def close_cfp(self):
pass
@with_roles(call={'editor'})
@state.transition(
state.PUBLISHABLE,
state.PUBLISHED,
title=__("Publish project"),
message=__("The project has been published"),
type='success',
)
def publish(self) -> bool:
"""Publish a project and return a flag if this is the first publishing."""
first_published = False
if not self.first_published_at:
self.first_published_at = sa.func.utcnow()
first_published = True
self.published_at = sa.func.utcnow()
return first_published
@with_roles(call={'editor'}) # skipcq: PTC-W0049
@state.transition(
state.PUBLISHED,
state.WITHDRAWN,
title=__("Withdraw project"),
message=__("The project has been withdrawn and is no longer listed"),
type='success',
)
def withdraw(self):
pass
@property
def title_inline(self) -> str:
"""Suffix a colon if the title does not end in ASCII sentence punctuation."""
if self.title and self.tagline:
if not self.title[-1] in ('?', '!', ':', ';', '.', ','):
return self.title + ':'
return self.title
with_roles(title_inline, read={'all'}, datasets={'primary', 'without_parent'})
@property
def title_suffix(self) -> str:
"""
Return the profile's title if the project's title doesn't derive from it.
Used in HTML title tags to render <title>{{ project }} - {{ suffix }}</title>.
"""
if not self.title.startswith(self.parent.title):
return self.profile.title
return ''
with_roles(title_suffix, read={'all'})
@property
def title_parts(self) -> List[str]:
"""
Return the hierarchy of titles of this project.
If the project's title is an extension of the account's title, only the
project's title is returned as a single list item. If they are distinct, both
are returned.
This list is used by :prop:`joined_title` to produce a slash-separated title,
but can be used directly when another rendering is required.
"""
if self.short_title == self.title:
# Project title does not derive from account title, so use both
return [self.profile.title, self.title]
# Project title extends account title, so account title is not needed
return [self.title]
with_roles(title_parts, read={'all'})
@property
def joined_title(self) -> str:
"""Return the project's title joined with the account's title, if divergent."""
return ' / '.join(self.title_parts)
with_roles(
joined_title, read={'all'}, datasets={'primary', 'without_parent', 'related'}
)
@with_roles(read={'all'}, datasets={'primary', 'without_parent', 'related'})
@cached_property
def datelocation(self) -> str:
"""
Return a date and location string for the project.
The format depends on project dates:
1. If it's a single day event:
> 11 Feb 2018, Bangalore
2. If multi-day event in same month:
> 09–12 Feb 2018, Bangalore
3. If multi-day event across months:
> 27 Feb–02 Mar 2018, Bangalore
4. If multi-day event across years:
> 30 Dec 2018–02 Jan 2019, Bangalore
"""
# FIXME: Replace strftime with Babel formatting
daterange = ''
if self.start_at is not None and self.end_at is not None:
schedule_start_at_date = self.start_at_localized.date()
schedule_end_at_date = self.end_at_localized.date()
daterange_format = '{start_date}–{end_date} {year}'
if schedule_start_at_date == schedule_end_at_date:
# if both dates are same, in case of single day project
strf_date = ''
daterange_format = '{end_date} {year}'
elif schedule_start_at_date.year != schedule_end_at_date.year:
# if the start date and end dates are in different years,
strf_date = '%d %b %Y'
elif schedule_start_at_date.month != schedule_end_at_date.month:
# If multi-day event across months
strf_date = '%d %b'
elif schedule_start_at_date.month == schedule_end_at_date.month:
# If multi-day event in same month
strf_date = '%d'
daterange = daterange_format.format(
start_date=schedule_start_at_date.strftime(strf_date),
end_date=schedule_end_at_date.strftime('%d %b'),
year=schedule_end_at_date.year,
)
return ', '.join([_f for _f in [daterange, self.location] if _f])
# TODO: Removing Delete feature till we figure out siteadmin feature
# @with_roles(call={'editor'})
# @state.transition(
# state.DELETABLE, state.DELETED, title=__("Delete project"),
# message=__("The project has been deleted"), type='success')
# def delete(self):
# pass
@sa.orm.validates('name', 'profile')
def _validate_and_create_redirect(self, key, value):
# TODO: When labels, venues and other resources are relocated from project to
# account, this validator can no longer watch for `profile` change. We'll need a
# more elaborate transfer mechanism that remaps resources to equivalent ones in
# the new `profile`.
if key == 'name':
value = value.strip() if value is not None else None
if not value or (key == 'name' and not valid_name(value)):
raise ValueError(f"Invalid value for {key}: {value!r}")
existing_value = getattr(self, key)
if value != existing_value and existing_value is not None:
ProjectRedirect.add(self)
return value
@with_roles(read={'all'}, datasets={'primary', 'without_parent'})
@cached_property
def cfp_start_at_localized(self):
return (
localize_timezone(self.cfp_start_at, tz=self.timezone)
if self.cfp_start_at
else None
)
@with_roles(read={'all'}, datasets={'primary', 'without_parent'})
@cached_property
def cfp_end_at_localized(self):
return (
localize_timezone(self.cfp_end_at, tz=self.timezone)
if self.cfp_end_at
else None
)
@with_roles(read={'all'}, datasets={'primary', 'without_parent'})
@cached_property
def start_at_localized(self):
"""Return localized start_at timestamp."""
return (
localize_timezone(self.start_at, tz=self.timezone)
if self.start_at
else None
)
@with_roles(read={'all'}, datasets={'primary', 'without_parent'})
@cached_property
def end_at_localized(self):
"""Return localized end_at timestamp."""
return localize_timezone(self.end_at, tz=self.timezone) if self.end_at else None
def update_schedule_timestamps(self):
"""Update cached timestamps from sessions."""
self.start_at = self.schedule_start_at
self.end_at = self.schedule_end_at
def roles_for(
self, actor: Optional[User] = None, anchors: Iterable = ()
) -> LazyRoleSet:
roles = super().roles_for(actor, anchors)
# https://github.com/hasgeek/funnel/pull/220#discussion_r168718052
roles.add('reader')
return roles
def is_safe_to_delete(self) -> bool:
"""Return True if project has no proposals."""
return self.proposals.count() == 0
@classmethod
def order_by_date(cls):
"""
Return an order by clause for the project's start_at or published_at.
param bool desc: Use descending order (default True)
"""
clause = sa.case(
[(cls.start_at.isnot(None), cls.start_at)],
else_=cls.published_at,
)
return clause
@classmethod
def all_unsorted(cls):
"""Return query of all published projects, without ordering criteria."""
return (
cls.query.join(Profile)
.outerjoin(Venue)
.filter(cls.state.PUBLISHED, Profile.is_verified.is_(True))
)
@classmethod
def all(cls): # noqa: A003
"""Return all published projects, ordered by date."""
return cls.all_unsorted().order_by(cls.order_by_date())
# The base class offers `get(parent, name)`. We accept f'{parent}/{name}' here for
# convenience as this is only used in shell access.
@classmethod
def get(cls, profile_project): # pylint: disable=arguments-differ
"""Get a project by its URL slug in the form ``<profile>/<project>``."""
profile_name, project_name = profile_project.split('/')
return (
cls.query.join(Profile)
.filter(Profile.name == profile_name, Project.name == project_name)
.one_or_none()
)
@classmethod
def migrate_profile( # type: ignore[return]
cls, old_profile: Profile, new_profile: Profile
) -> OptionalMigratedTables:
"""Migrate from one account to another when merging users."""
names = {project.name for project in new_profile.projects}
for project in old_profile.projects:
if project.name in names:
app.logger.warning(
"Project %r had a conflicting name in account migration,"
" so renaming by adding adding random value to name",
project,
)
project.name += '-' + buid()
project.profile = new_profile
add_search_trigger(Project, 'search_vector')
@reopen(Profile)
class __Profile:
id: sa.Column # noqa: A003
listed_projects = sa.orm.relationship(
Project,
lazy='dynamic',
primaryjoin=sa.and_(
Profile.id == Project.profile_id,
Project.state.PUBLISHED,
),
viewonly=True,
)
draft_projects = sa.orm.relationship(
Project,
lazy='dynamic',
primaryjoin=sa.and_(
Profile.id == Project.profile_id,
sa.or_(Project.state.DRAFT, Project.cfp_state.DRAFT),
),
viewonly=True,
)
projects_by_name = with_roles(
sa.orm.relationship(
Project, collection_class=attribute_mapped_collection('name'), viewonly=True
),
read={'all'},
)
def draft_projects_for(self, user: Optional[User]) -> List[Project]:
if user is not None:
return [
membership.project
for membership in user.projects_as_crew_active_memberships.join(
Project, Profile
).filter(
# Project is attached to this account
Project.profile_id == self.id,
# Project is in draft state OR has a draft call for proposals
sa.or_(Project.state.DRAFT, Project.cfp_state.DRAFT),
)
]
return []
def unscheduled_projects_for(self, user: Optional[User]) -> List[Project]:
if user is not None:
return [
membership.project
for membership in user.projects_as_crew_active_memberships.join(
Project, Profile
).filter(
# Project is attached to this account
Project.profile_id == self.id,
# Project is in draft state OR has a draft call for proposals
sa.or_(Project.state.PUBLISHED_WITHOUT_SESSIONS),
)
]
return []
@with_roles(read={'all'}, datasets={'primary', 'without_parent', 'related'})
@cached_property
def published_project_count(self) -> int:
return (
self.listed_projects.filter(Project.state.PUBLISHED).order_by(None).count()
)
class ProjectRedirect(TimestampMixin, db.Model): # type: ignore[name-defined]
__tablename__ = 'project_redirect'
profile_id = sa.Column(
sa.Integer, sa.ForeignKey('profile.id'), nullable=False, primary_key=True
)
profile = sa.orm.relationship(
Profile, backref=sa.orm.backref('project_redirects', cascade='all')
)
parent = sa.orm.synonym('profile')
name = sa.Column(sa.Unicode(250), nullable=False, primary_key=True)
project_id = sa.Column(
sa.Integer, sa.ForeignKey('project.id', ondelete='SET NULL'), nullable=True
)
project = sa.orm.relationship(Project, backref='redirects')
def __repr__(self) -> str:
"""Represent :class:`ProjectRedirect` as a string."""
if not self.project:
return f'<ProjectRedirect {self.profile.name}/{self.name}: (none)>'
return (
f'<ProjectRedirect {self.profile.name}/{self.name}'
f' → {self.project.profile.name}/{self.project.name}>'
)
def redirect_view_args(self):
if self.project:
return {'profile': self.profile.name, 'project': self.project.name}
return {}
@classmethod
def add(cls, project, profile=None, name=None):
"""
Add a project redirect in a given profile.
:param project: The project to create a redirect for
:param profile: The profile to place the redirect in, defaulting to existing
:param str name: Name to redirect, defaulting to project's existing name
Typical use is when a project is renamed, to create a redirect from its previous
name, or when it's moved between projects, to create a redirect from previous
project.
"""
if profile is None:
profile = project.profile
if name is None:
name = project.name
redirect = ProjectRedirect.query.get((profile.id, name))
if redirect is None:
redirect = ProjectRedirect(profile=profile, name=name, project=project)
db.session.add(redirect)
else:
redirect.project = project
return redirect
@classmethod
def migrate_profile( # type: ignore[return]
cls, old_profile: Profile, new_profile: Profile
) -> OptionalMigratedTables:
"""
Discard redirects when migrating profiles.
Since there is no profile redirect, all project redirects will also be
unreachable and are no longer relevant.
"""
names = {pr.name for pr in new_profile.project_redirects}
for pr in old_profile.project_redirects:
if pr.name not in names:
pr.profile = new_profile
else:
# Discard project redirect since the name is already taken by another
# redirect in the new account
db.session.delete(pr)
class ProjectLocation(TimestampMixin, db.Model): # type: ignore[name-defined]
__tablename__ = 'project_location'
#: Project we are tagging
project_id = sa.Column(
sa.Integer, sa.ForeignKey('project.id'), primary_key=True, nullable=False
)
project = sa.orm.relationship(
Project, backref=sa.orm.backref('locations', cascade='all')
)
#: Geonameid for this project
geonameid = sa.Column(sa.Integer, primary_key=True, nullable=False, index=True)
primary = sa.Column(sa.Boolean, default=True, nullable=False)
def __repr__(self) -> str:
"""Represent :class:`ProjectLocation` as a string."""
pri_sec = 'primary' if self.primary else 'secondary'
return (
f'<ProjectLocation {self.geonameid} {pri_sec} for project {self.project!r}>'
)
@reopen(Commentset)
class __Commentset:
project = with_roles(
sa.orm.relationship(Project, uselist=False, back_populates='commentset'),
grants_via={None: {'editor': 'document_subscriber'}},
)
# Tail imports
# pylint: disable=wrong-import-position
from .project_membership import ProjectCrewMembership # isort:skip
from .venue import Venue # isort:skip # skipcq: FLK-E402
| agpl-3.0 |
kapilrastogi/Impala | tests/benchmark/plugins/vtune_plugin.py | 16 | 5658 | # Copyright (c) 2013 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ
from tests.util.cluster_controller import ClusterController
from tests.benchmark.plugins import Plugin
import datetime
import threading
import time
class VTunePlugin(Plugin):
"""
This plugin runs Intel's VTune amplifier
Before the query is executed, the plugin starts VTune collection. After the query has
completed, the plugin stops the collection.
"""
__name__ = "VTunePlugin"
#TODO: We should make these configurable
VTUNE_PATH = '/opt/intel/vtune_amplifier_xe_2013/'
TARGET_PROCESS = 'impalad'
RESULT_DIR_BASE = '/var/log/impala/vtune/' + '%s' + '/db=%s'
RESULT_QUERY_SCOPE = '_query=%s_format=%s_iteration=%i'
KILL_CMD = 'ps aux | grep vtune | grep -v grep | awk \'{print $2}\' | xargs kill -9'
def __init__(self, *args, **kwargs):
self.cluster_controller = ClusterController(*args, **kwargs)
Plugin.__init__(self, *args, **kwargs)
# This is the unique run identifier
self.run_tag = environ.get('BUILD_TAG', datetime.datetime.now())
# This method checks to ensure that VTune is installed in the expected path
self._check_path_on_hosts()
def run_pre_hook(self, context):
# Source VTune variables and build the correct command string. For the workload
# scope, the database name is added to the result path. For the query scope, the
# query name and iteration is also added.
result_dir = self.RESULT_DIR_BASE
if context.get('scope') == 'Query': result_dir = result_dir + self.RESULT_QUERY_SCOPE
pre_cmd = ('echo 0 > /proc/sys/kernel/nmi_watchdog\n'
'source ' + self.VTUNE_PATH + 'amplxe-vars.sh\n'
'amplxe-cl -collect advanced-hotspots '
'-result-dir=' + result_dir + ' -target-process=' + self.TARGET_PROCESS)
table_format_str = context.get('table_format', 'UNKNOWN').replace('/', '-')
pre_cmd = pre_cmd % (self.run_tag, context.get('db_name', 'UNKNOWN'),
context.get('short_query_name', 'UNKNOWN'),
table_format_str,context.get('iteration', 1))
self.thread = threading.Thread(target=self.cluster_controller.deprecated_run_cmd,
args=[pre_cmd])
self.thread.start()
# TODO: Test whether this is a good time to wait
# Because we start this colection asychronously, we need to ensure that all the
# machines are running. For now this is simplier than doing the full check that we
# do in the post hook.
time.sleep(2)
def run_post_hook(self, context):
# Source VTune variables and build the correct command string. This process is
# identical to that in run_pre_hook()
result_dir = self.RESULT_DIR_BASE
if context.get('scope') == 'Query': result_dir = result_dir + self.RESULT_QUERY_SCOPE
post_cmd = ('source ' + self.VTUNE_PATH + 'amplxe-vars.sh \n'
'amplxe-cl -command stop -result-dir=' + result_dir)
table_format_str = context.get('table_format', 'UNKNOWN').replace('/', '-')
# TODO: Fix the context dict to remove the ambiguity of the variable name
# new_query_name
post_cmd = post_cmd % (self.run_tag, context.get('db_name', 'UNKNOWN'),
context.get('short_query_name', 'UNKNOWN'), table_format_str,
context.get('iteration', 1))
self.cluster_controller.deprecated_run_cmd(post_cmd)
# Wait for reports to generate and kill hosts that are hanging around
self._wait_for_completion(2)
def _check_path_on_hosts(self):
path_check_cmd = 'if [ -d "%s" ]; then echo "exists"\nfi' % (self.VTUNE_PATH)
host_check_dict = self.cluster_controller.deprecated_run_cmd(path_check_cmd)
bad_hosts = [k for k in host_check_dict.keys() if host_check_dict[k] != "exists"]
if bad_hosts:
raise RuntimeError('VTune is not installed in the expected path for hosts %s' %
",".join(bad_hosts))
def _wait_for_completion(self, timeout):
"""
Waits for VTune reports to finish generating.
On large datasets it can take time for the reports to generate. This method waits for
a timeout period, checking to see if any machine in the cluster is still running a
VTune command. After the timeout period, _kill_vtune() is called which kills any
unterminated VTune commands.
"""
grep_dict = {}
reports_done = True
finish_time = datetime.datetime.now() + datetime.timedelta(minutes=timeout)
while ((reports_done) and (datetime.datetime.now() < finish_time)):
grep_dict = self.cluster_controller.deprecated_run_cmd(
'ps aux|grep vtune|grep -v grep')
reports_done = any(map(self.__is_not_none_or_empty_str, grep_dict.values()))
# TODO: Investigate a better length of time for the sleep period between checks
time.sleep(5)
self._kill_vtune(grep_dict)
def _kill_vtune(self, host_dict):
# This method kills threads that are still hanging around after timeout
kill_list = filter(self.__is_not_none_or_empty_str, host_dict.keys())
if kill_list:
self.cluster_controller.deprecated_run_cmd(self.KILL_CMD, hosts=kill_list)
def __is_not_none_or_empty_str(self, s):
return s != None and s != ''
| apache-2.0 |
arabenjamin/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 304 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/base.py | 13 | 16713 | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| bsd-3-clause |
delhivery/django | tests/generic_views/test_list.py | 306 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
extremewaysback/django | tests/generic_views/test_list.py | 306 | 12129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.utils.encoding import force_str
from django.views.generic.base import View
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class ListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIsInstance(res.context['view'], View)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
def test_explicitly_ordered_list_view(self):
Book.objects.create(name="Zebras for Dummies", pages=800, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/list/books/sorted/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, '2066')
self.assertEqual(res.context['object_list'][1].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][2].name, 'Zebras for Dummies')
res = self.client.get('/list/books/sortedbypagesandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object_list'][0].name, 'Dreaming in Code')
self.assertEqual(res.context['object_list'][1].name, 'Zebras for Dummies')
self.assertEqual(res.context['object_list'][2].name, '2066')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| bsd-3-clause |
CodingCat/mxnet | example/autoencoder/data.py | 18 | 1348 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
DonBeo/scikit-learn | examples/decomposition/plot_image_denoising.py | 84 | 5820 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
d1hotpep/openai_gym | gym/envs/parameter_tuning/train_deep_cnn.py | 7 | 8578 | from __future__ import print_function
import gym
import random
from gym import spaces
import numpy as np
from keras.datasets import cifar10, mnist, cifar100
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.regularizers import WeightRegularizer
from keras import backend as K
from itertools import cycle
import math
class CNNClassifierTraining(gym.Env):
"""Environment where agent learns to select training parameters and
architecture of a deep convolutional neural network
Training parameters that the agent can adjust are learning
rate, learning rate decay, momentum, batch size, L1 / L2 regularization.
Agent can select up to 5 cnn layers and up to 2 fc layers.
Agent is provided with feedback on validation accuracy, as well as on
the size of a dataset.
"""
metadata = {"render.modes": ["human"]}
def __init__(self, natural=False):
"""
Initialize environment
"""
# I use array of len 1 to store constants (otherwise there were some errors)
self.action_space = spaces.Tuple((
spaces.Box(-5.0, 0.0, 1), # learning rate
spaces.Box(-7.0, -2.0, 1), # decay
spaces.Box(-5.0, 0.0, 1), # momentum
spaces.Box(2, 8, 1), # batch size
spaces.Box(-6.0, 1.0, 1), # l1 reg
spaces.Box(-6.0, 1.0, 1), # l2 reg
spaces.Box(0.0, 1.0, (5, 2)), # convolutional layer parameters
spaces.Box(0.0, 1.0, (2, 2)), # fully connected layer parameters
))
# observation features, in order: num of instances, num of labels,
# validation accuracy after training with given parameters
self.observation_space = spaces.Box(-1e5, 1e5, 2) # validation accuracy
# Start the first game
self._reset()
def _step(self, action):
"""
Perform some action in the environment
"""
assert self.action_space.contains(action)
lr, decay, momentum, batch_size, l1, l2, convs, fcs = action
# map ranges of inputs
lr = (10.0 ** lr[0]).astype('float32')
decay = (10.0 ** decay[0]).astype('float32')
momentum = (10.0 ** momentum[0]).astype('float32')
batch_size = int(2 ** batch_size[0])
l1 = (10.0 ** l1[0]).astype('float32')
l2 = (10.0 ** l2[0]).astype('float32')
"""
names = ["lr", "decay", "mom", "batch", "l1", "l2"]
values = [lr, decay, momentum, batch_size, l1, l2]
for n,v in zip(names, values):
print(n,v)
"""
diverged, acc = self.train_blueprint(lr, decay, momentum, batch_size, l1, l2, convs, fcs)
# save best validation. If diverged, acc is zero
if acc > self.best_val:
self.best_val = acc
self.previous_acc = acc
self.epoch_idx += 1
done = self.epoch_idx == 10
reward = self.best_val
# as for number of labels increases, learning problem becomes
# more difficult for fixed dataset size. In order to avoid
# for the agent to ignore more complex datasets, on which
# accuracy is low and concentrate on simple cases which bring bulk
# of reward, reward is normalized by number of labels in dataset
reward *= self.nb_classes
# formula below encourages higher best validation
reward += reward ** 2
return self._get_obs(), reward, done, {}
def _render(self, mode="human", close=False):
if close:
return
print(">> Step ", self.epoch_idx, "best validation:", self.best_val)
def _get_obs(self):
"""
Observe the environment. Is usually used after the step is taken
"""
# observation as per observation space
return np.array([self.nb_inst,
self.previous_acc])
def data_mix(self):
# randomly choose dataset
dataset = random.choice(['mnist', 'cifar10', 'cifar100']) #
n_labels = 10
if dataset == "mnist":
data = mnist.load_data()
if dataset == "cifar10":
data = cifar10.load_data()
if dataset == "cifar100":
data = cifar100.load_data()
n_labels = 100
# Choose dataset size. This affects regularization needed
r = np.random.rand()
# not using full dataset to make regularization more important and
# speed up testing a little bit
data_size = int(2000 * (1 - r) + 40000 * r)
# I do not use test data for validation, but last 10000 instances in dataset
# so that trained models can be compared to results in literature
(CX, CY), (CXt, CYt) = data
if dataset == "mnist":
CX = np.expand_dims(CX, axis=1)
data = CX[:data_size], CY[:data_size], CX[-10000:], CY[-10000:]
return data, n_labels
def _reset(self):
self.generate_data()
# initial accuracy values
self.best_val = 0.0
self.previous_acc = 0.0
self.epoch_idx = 0
return self._get_obs()
def generate_data(self):
self.data, self.nb_classes = self.data_mix()
# zero index corresponds to training inputs
self.nb_inst = len(self.data[0])
def train_blueprint(self, lr, decay, momentum, batch_size, l1, l2, convs, fcs):
X, Y, Xv, Yv = self.data
nb_classes = self.nb_classes
reg = WeightRegularizer()
# a hack to make regularization variable
reg.l1 = K.variable(0.0)
reg.l2 = K.variable(0.0)
# input square image dimensions
img_rows, img_cols = X.shape[-1], X.shape[-1]
img_channels = X.shape[1]
# convert class vectors to binary class matrices
Y = np_utils.to_categorical(Y, nb_classes)
Yv = np_utils.to_categorical(Yv, nb_classes)
# here definition of the model happens
model = Sequential()
has_convs = False
# create all convolutional layers
for val, use in convs:
# Size of convolutional layer
cnvSz = int(val * 127) + 1
if use < 0.5:
continue
has_convs = True
model.add(Convolution2D(cnvSz, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols),
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
if has_convs:
model.add(Flatten())
else:
model.add(Flatten(input_shape=(img_channels, img_rows, img_cols))) # avoid excetpions on no convs
# create all fully connected layers
for val, use in fcs:
if use < 0.5:
continue
# choose fully connected layer size
densesz = int(1023 * val) + 1
model.add(Dense(densesz,
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_classes,
W_regularizer=reg,
b_regularizer=reg))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
X = X.astype('float32')
Xv = Xv.astype('float32')
X /= 255
Xv /= 255
model = model
sgd = sgd
reg = reg
# set parameters of training step
sgd.lr.set_value(lr)
sgd.decay.set_value(decay)
sgd.momentum.set_value(momentum)
reg.l1.set_value(l1)
reg.l2.set_value(l2)
# train model for one epoch_idx
H = model.fit(X, Y,
batch_size=int(batch_size),
nb_epoch=10,
shuffle=True)
diverged = math.isnan(H.history['loss'][-1])
acc = 0.0
if not diverged:
_, acc = model.evaluate(Xv, Yv)
return diverged, acc
| mit |
arabenjamin/scikit-learn | examples/model_selection/plot_validation_curve.py | 228 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
heli522/scikit-learn | sklearn/tree/tree.py | 112 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
ltrottier/keras-object-recognition | train.py | 1 | 3842 | import os
import numpy as np
from models import load_model
from keras import backend as K
from keras.callbacks import (
LearningRateScheduler, TensorBoard, ModelCheckpoint, CSVLogger)
from keras.datasets import cifar10, cifar100, mnist
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from argparse import ArgumentParser
config = K.tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = K.tf.Session(config=config)
K.set_session(sess)
# Read options
parser = ArgumentParser()
parser.add_argument('--savepath', default='results')
parser.add_argument('--dataset', default="cifar10", help="cifar10, cifar100")
parser.add_argument('--net_type', default='resnet')
parser.add_argument('--depth', type=int, default=16)
parser.add_argument('--widen', type=int, default=1)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--randomcrop', type=int, default=4)
parser.add_argument('--randomcrop_type', default="reflect", help="zero, reflect")
parser.add_argument('--hflip', action='store_false', default=True)
parser.add_argument('--epoch_max', type=int, default=200)
parser.add_argument('--epoch_init', type=int, default=0)
parser.add_argument('--bs', type=int, default=128)
parser.add_argument('--nthreads', type=int, default=2)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--lr_decay', type=float, default=0.2)
parser.add_argument('--lr_schedule', nargs='+', default=[60,120,160], type=int)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--nesterov', action='store_false', default=True)
opts = parser.parse_args()
print(opts)
# Load data
(xtr, ytr), (xtst, ytst) = eval(opts.dataset).load_data()
xtr = xtr.astype('float32')
ytr = to_categorical(ytr)
xtst = xtst.astype('float32')
ytst = to_categorical(ytst)
trsize, imh, imw, imc = xtr.shape
tstsize = xtst.shape[0]
n_classes = ytr.shape[1]
# Data generator
trdatagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=opts.randomcrop/imw,
height_shift_range=opts.randomcrop/imh,
fill_mode=opts.randomcrop_type,
cval=0,
horizontal_flip=opts.hflip)
trdatagen.fit(xtr)
trgenerator = trdatagen.flow(xtr, ytr, batch_size=opts.bs)
tstdatagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True)
tstdatagen.fit(xtr)
tstgenerator = tstdatagen.flow(xtst, ytst, batch_size=opts.bs)
# Instanciate model
model = load_model(
net_type=opts.net_type,
input_shape=(imh, imw, imc),
n_classes=n_classes,
depth=opts.depth,
weight_decay=opts.weight_decay,
widen=opts.widen)
print(model.summary())
optimizer = SGD(lr=opts.lr, momentum=opts.momentum, nesterov=opts.nesterov)
model.compile(
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=['accuracy'])
# Define callbacks
def lrs_callback(epoch):
return opts.lr * opts.lr_decay**(np.array(opts.lr_schedule) <= epoch).sum()
learning_rate_scheduler = LearningRateScheduler(lrs_callback)
tensorboard = TensorBoard(opts.savepath)
checkpoint = ModelCheckpoint(
os.path.join(opts.savepath, "model.hdf5"),
monitor="val_acc",
save_best_only=True,
mode="max")
logger = CSVLogger(os.path.join(opts.savepath, "results.log"), append=True)
callbacks = [learning_rate_scheduler, tensorboard, checkpoint, logger]
# Train model
model.fit_generator(
generator=trgenerator,
samples_per_epoch=trsize,
nb_epoch=opts.epoch_max,
initial_epoch=opts.epoch_init,
nb_worker=opts.nthreads,
pickle_safe=True,
callbacks=callbacks,
validation_data=tstgenerator,
nb_val_samples=tstsize)
# Save last updated model
model.save(os.path.join(opts.savepath, "model_last.hdf5"))
| mit |
arabenjamin/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 243 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 243 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/grid_search.py | 11 | 34418 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import _check_cv as check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
samples = []
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# get complete grid and yield from it
param_grid = list(ParameterGrid(self.param_distributions))
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
while len(samples) < self.n_iter:
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
samples.append(params)
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class ChangedBehaviorWarning(UserWarning):
pass
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
degree=..., gamma=..., kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=..., verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, loss_func=None,
score_func=None, fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/cross_decomposition/pls_.py | 4 | 28099 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y[:, None]
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
@property
def coefs(self):
check_is_fitted(self, 'coef_')
DeprecationWarning("``coefs`` attribute has been deprecated and will be "
"removed in version 0.17. Use ``coef_`` instead")
return self.coef_
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
p = X.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
ChadFulton/statsmodels | statsmodels/datasets/tests/test_utils.py | 1 | 2417 | import os
import numpy as np
from numpy.testing import assert_, assert_array_equal
import pytest
from statsmodels.datasets import get_rdataset, webuse, check_internet, utils
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
test_url = "https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/cars.csv"
internet_available = check_internet(test_url)
if not internet_available:
pytest.skip('Unable to retrieve file - skipping test')
duncan = get_rdataset("Duncan", "carData", cache=cur_dir)
assert_(isinstance(duncan, utils.Dataset))
duncan = get_rdataset("Duncan", "carData", cache=cur_dir)
assert_(duncan.from_cache)
# test writing and reading cache
guerry = get_rdataset("Guerry", "HistData", cache=cur_dir)
assert_(guerry.from_cache is False)
guerry2 = get_rdataset("Guerry", "HistData", cache=cur_dir)
assert_(guerry2.from_cache is True)
fn = "raw.githubusercontent.com,vincentarelbundock,Rdatasets,master,csv,HistData,Guerry.csv.zip"
os.remove(os.path.join(cur_dir, fn))
fn = "raw.githubusercontent.com,vincentarelbundock,Rdatasets,master,doc,HistData,rst,Guerry.rst.zip"
os.remove(os.path.join(cur_dir, fn))
def test_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
res2 = np.array([list(row) for row in res2])
base_gh = "https://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
internet_available = check_internet(base_gh)
if not internet_available:
pytest.skip('Unable to retrieve file - skipping test')
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1, res2)
def test_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "https://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
internet_available = check_internet(base_gh)
if not internet_available:
pytest.skip('Unable to retrieve file - skipping test')
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta.astype(float))
| bsd-3-clause |
ChadFulton/statsmodels | statsmodels/regression/tests/test_glsar_gretl.py | 1 | 25959 | # -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl(object):
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| bsd-3-clause |
heli522/scikit-learn | examples/cluster/plot_digits_linkage.py | 366 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
hjellinek/server | tests/unit/test_simulator.py | 2 | 5581 | """
Tests for simulated data
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.datamodel as datamodel
import ga4gh.datamodel.datasets as datasets
import ga4gh.datamodel.reads as reads
import ga4gh.datamodel.variants as variants
class TestSimulatedVariantSet(unittest.TestCase):
"""
Test properties of the SimulatedVariantSet
"""
def setUp(self):
self.randomSeed = 0
self.numCalls = 2
# ensure variantDensity is >= 1 so we get deterministic behavoir
self.variantDensity = 1
self.simulatedVariantSet = self._getSimulatedVariantSet()
self.referenceName = 'ref'
self.startPosition = 100
self.endPosition = 103
self.variantName = 'unused'
self.callSetIds = ['unused']
self.bases = ["A", "C", "G", "T"]
def _getSimulatedVariantSet(self):
dataset = datasets.AbstractDataset('dataset1')
simulatedVariantSet = variants.SimulatedVariantSet(
dataset, 'variantSet1', self.randomSeed, self.numCalls,
self.variantDensity)
return simulatedVariantSet
def _getSimulatedVariantsList(self, simulatedVariantSet=None):
if simulatedVariantSet is None:
simulatedVariantSet = self.simulatedVariantSet
simulatedVariants = simulatedVariantSet.getVariants(
self.referenceName, self.startPosition, self.endPosition,
self.variantName, self.callSetIds)
variantList = list(simulatedVariants)
return variantList
def testConstruction(self):
# initializing SimulatedVariantSet should store values correctly
self.assertEqual(
self.randomSeed, self.simulatedVariantSet._randomSeed)
self.assertEqual(
self.numCalls, self.simulatedVariantSet._numCalls)
self.assertEqual(
self.variantDensity, self.simulatedVariantSet._variantDensity)
self.assertEqual(
self.simulatedVariantSet.getCreationTime(),
self.simulatedVariantSet.getUpdatedTime())
def testGetVariants(self):
# calling getVariants should produce the expected results
variantList = self._getSimulatedVariantsList()
self.assertEqual(
len(variantList), self.endPosition - self.startPosition)
for offset, simulatedVariant in enumerate(variantList):
start = self.startPosition + offset
variantSetCompoundId = self.simulatedVariantSet.getCompoundId()
variantCompoundId = datamodel.VariantCompoundId.parse(
simulatedVariant.id)
self.assertEqual(
variantSetCompoundId.variantSetId,
self.simulatedVariantSet.getId())
self.assertEqual(
variantSetCompoundId.variantSetId,
variantCompoundId.variantSetId)
self.assertEqual(
variantCompoundId.referenceName, self.referenceName)
self.assertEqual(
variantCompoundId.start, str(simulatedVariant.start))
self.assertEqual(
simulatedVariant.variantSetId,
self.simulatedVariantSet.getId())
self.assertEqual(
simulatedVariant.referenceName, self.referenceName)
self.assertEqual(
simulatedVariant.created, simulatedVariant.updated)
self.assertEqual(simulatedVariant.start, start)
self.assertEqual(simulatedVariant.end, start + 1)
self.assertIn(simulatedVariant.referenceBases, self.bases)
self.assertIn(
simulatedVariant.alternateBases[0], self.bases)
self.assertEqual(len(simulatedVariant.calls), self.numCalls)
def testConsistency(self):
# two SimulatedBackend objects given the same parameters
# should produce identical variant lists
variantListOne = self._getSimulatedVariantsList()
simulatedVariantSetTwo = self._getSimulatedVariantSet()
variantListTwo = self._getSimulatedVariantsList(
simulatedVariantSetTwo)
self._assertEqualVariantLists(variantListOne, variantListTwo)
def testSelfConsistent(self):
# the same SimulatedBackend should produce identical
# variant lists given the same parameters
variantListOne = self._getSimulatedVariantsList()
variantListTwo = self._getSimulatedVariantsList()
self.assertEqual(variantListOne, variantListTwo)
def _assertEqualVariantLists(self, variantListOne, variantListTwo):
# need to make time-dependent fields equal before the comparison,
# otherwise we're introducing a race condition
timeDependentFields = ['created', 'updated']
for variantList in (variantListOne, variantListTwo):
for variant in variantList:
for field in timeDependentFields:
setattr(variant, field, 0)
self.assertEqual(variantListOne, variantListTwo)
class TestSimulatedReadGroupSet(unittest.TestCase):
"""
Test properties of the simulated ReadGroupSet
"""
def testCreation(self):
dataset = datasets.AbstractDataset('dataset1')
localId = "readGroupSetId"
simulatedReadGroupSet = reads.SimulatedReadGroupSet(
dataset, localId)
for readGroup in simulatedReadGroupSet.getReadGroups():
alignments = list(readGroup.getReadAlignments())
self.assertGreater(len(alignments), 0)
| apache-2.0 |
heli522/scikit-learn | sklearn/tests/test_pipeline.py | 161 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
mrgloom/rpforest | tests/test_rpforest.py | 4 | 6465 | import cPickle as pickle
import numpy as np
from sklearn.datasets import load_digits
from rpforest import RPForest
def _get_mnist_data():
digits = load_digits()['images']
no_img, rows, cols = digits.shape
X = digits.reshape((no_img, rows * cols))
X = np.ascontiguousarray(X)
np.random.shuffle(X)
X_test = X[:100]
X_train = X[100:]
return X_train, X_test
def test_find_self():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.3),
(10, 0.5),
(50, 0.9)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
tree.fit(X_train)
nodes = {k: set(v) for k, v in tree.get_leaf_nodes()}
for i, x_train in enumerate(X_train):
nns = tree.query(x_train, 10)[:10]
assert nns[0] == i
point_codes = tree.encode(x_train)
for code in point_codes:
assert i in nodes[code]
tree = pickle.loads(pickle.dumps(tree))
nodes = {k: set(v) for k, v in tree.get_leaf_nodes()}
for i, x_train in enumerate(X_train):
nns = tree.query(x_train, 10)[:10]
assert nns[0] == i
point_codes = tree.encode(x_train)
for code in point_codes:
assert i in nodes[code]
def test_clear():
X_train, X_test = _get_mnist_data()
tree = RPForest(leaf_size=10, no_trees=10)
tree.fit(X_train)
for leaf_code, leaf_indices in tree.get_leaf_nodes():
assert leaf_indices
tree.clear()
for leaf_code, leaf_indices in tree.get_leaf_nodes():
assert not leaf_indices
def test_max_size():
X_train, X_test = _get_mnist_data()
tree = RPForest(leaf_size=10, no_trees=10)
tree.fit(X_train)
for leaf_code, leaf_indices in tree.get_leaf_nodes():
assert len(leaf_indices) < 10
def test_encoding_mnist():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.3),
(10, 0.5),
(50, 0.9)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
tree.fit(X_train)
for x_train in X_train:
encodings_0 = tree.encode(x_train)
encodings_1 = tree.encode(x_train)
assert encodings_0 == encodings_1
tree = pickle.loads(pickle.dumps(tree))
for x_train in X_train:
encodings_0 = tree.encode(x_train)
encodings_1 = tree.encode(x_train)
assert encodings_0 == encodings_1
def test_serialization_mnist():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.3),
(10, 0.5),
(50, 0.9)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
tree.fit(X_train)
# Serialize and deserialize
tree = pickle.loads(pickle.dumps(tree))
precision = 0.0
X_train /= np.linalg.norm(X_train, axis=1)[:, np.newaxis]
for x_test in X_test:
true_nns = np.argsort(-np.dot(X_train, x_test))[:10]
nns = tree.query(x_test, 10)[:10]
assert (nns < X_train.shape[0]).all()
precision += len(set(nns) & set(true_nns)) / 10.0
precision /= X_test.shape[0]
assert precision >= expected_precision
def test_mnist():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.3),
(10, 0.5),
(50, 0.9)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
tree.fit(X_train)
precision = 0.0
X_train /= np.linalg.norm(X_train, axis=1)[:, np.newaxis]
for x_test in X_test:
true_nns = np.argsort(-np.dot(X_train, x_test))[:10]
nns = tree.query(x_test, 10)[:10]
assert (nns < X_train.shape[0]).all()
precision += len(set(nns) & set(true_nns)) / 10.0
precision /= X_test.shape[0]
assert precision >= expected_precision
def test_candidates_mnist():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.12),
(10, 0.2),
(50, 0.5),
(80, 0.6)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
tree.fit(X_train)
precision = 0.0
X_train /= np.linalg.norm(X_train, axis=1)[:, np.newaxis]
for x_test in X_test:
true_nns = np.argsort(-np.dot(X_train, x_test))[:10]
check_nns = tree.get_candidates(x_test, 100000)
assert len(check_nns) == len(set(check_nns))
assert -1 not in check_nns
assert (check_nns < X_train.shape[0]).all()
nns = tree.get_candidates(x_test, 10)[:10]
assert (nns < X_train.shape[0]).all()
precision += len(set(nns) & set(true_nns)) / 10.0
precision /= X_test.shape[0]
assert precision >= expected_precision
def test_sample_training():
X_train, X_test = _get_mnist_data()
for no_trees, expected_precision in ((1, 0.05),
(5, 0.3),
(10, 0.5),
(50, 0.9)):
tree = RPForest(leaf_size=10, no_trees=no_trees)
# Fit on quarter of data
X_sample = X_train[:X_train.shape[0] / 4]
tree.fit(X_sample)
# Clear and index everything
tree.clear()
for i, x in enumerate(X_train):
tree.index(i, x)
tree._X = X_train
precision = 0.0
X_train /= np.linalg.norm(X_train, axis=1)[:, np.newaxis]
for x_test in X_test:
true_nns = np.argsort(-np.dot(X_train, x_test))[:10]
nns = tree.query(x_test, 10)[:10]
precision += len(set(nns) & set(true_nns)) / 10.0
precision /= X_test.shape[0]
assert precision >= expected_precision
| apache-2.0 |
arabenjamin/scikit-learn | examples/linear_model/plot_ridge_path.py | 253 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jhaux/tensorflow | tensorflow/contrib/keras/api/keras/datasets/reuters/__init__.py | 56 | 1065 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters newswire topic classification dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.datasets.reuters import get_word_index
from tensorflow.contrib.keras.python.keras.datasets.reuters import load_data
del absolute_import
del division
del print_function
| apache-2.0 |
ifuding/Kaggle | TalkingDataFraudDetect/Code/danieleewww.py | 3 | 16607 |
"""
If you find this kernel helpful please upvote. Also any suggestion for improvement will be warmly welcomed.
I made cosmetic changes in the [code](https://www.kaggle.com/aharless/kaggle-runnable-version-of-baris-kanber-s-lightgbm/code).
Added some new features. Ran for 25mil chunk rows.
Also taken ideas from various public kernels.
"""
FILENO= 6 #To distinguish the output file name.
debug=0 #Whethere or not in debuging mode
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
import lightgbm as lgb
import gc
#import matplotlib.pyplot as plt
import os
###### Feature extraction ######
#### Extracting next click feature
### Taken help from https://www.kaggle.com/nanomathias/feature-engineering-importance-testing
###Did some Cosmetic changes
predictors=[]
def do_next_prev_Click( df,agg_suffix, agg_type='float32'):
print('Extracting new features...')
df['hour'] = pd.to_datetime(df.click_time).dt.hour.astype('int8')
df['day'] = pd.to_datetime(df.click_time).dt.day.astype('int8')
#### New added
df['minute'] = pd.to_datetime(df.click_time).dt.minute.astype('int8')
predictors.append('minute')
df['second'] = pd.to_datetime(df.click_time).dt.second.astype('int8')
predictors.append('second')
# print(f">> \nExtracting {agg_suffix} time calculation features...\n")
print(">> \nExtracting {0} time calculation features...\n".format(agg_suffix))
GROUP_BY_NEXT_CLICKS = [
# V1
# {'groupby': ['ip']},
# {'groupby': ['ip', 'app']},
# {'groupby': ['ip', 'channel']},
# {'groupby': ['ip', 'os']},
# V3
{'groupby': ['ip', 'app', 'device', 'os', 'channel']},
{'groupby': ['ip', 'os', 'device']},
{'groupby': ['ip', 'os', 'device', 'app']}
]
# Calculate the time to next click for each group
for spec in GROUP_BY_NEXT_CLICKS:
# Name of new feature
new_feature = '{}_{}'.format('_'.join(spec['groupby']),agg_suffix)
# Unique list of features to select
all_features = spec['groupby'] + ['click_time']
# Run calculation
#print(f">> Grouping by {spec['groupby']}, and saving time to {agg_suffix} in: {new_feature}")
print(">> Grouping by {0}, and saving time to {1} in: {2}".format(spec['groupby'], agg_suffix, new_feature))
if agg_suffix=="nextClick":
df[new_feature] = (df[all_features].groupby(spec[
'groupby']).click_time.shift(-1) - df.click_time).dt.seconds.astype(agg_type)
elif agg_suffix== "prevClick":
df[new_feature] = (df.click_time - df[all_features].groupby(spec[
'groupby']).click_time.shift(+1) ).dt.seconds.astype(agg_type)
predictors.append(new_feature)
gc.collect()
# print('predictors',predictors)
return (df)
## Below a function is written to extract count feature by aggregating different cols
def do_count( df, group_cols, agg_type='uint32', show_max=False, show_agg=True ):
agg_name='{}count'.format('_'.join(group_cols))
if show_agg:
print( "\nAggregating by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
## Below a function is written to extract unique count feature from different cols
def do_countuniq( df, group_cols, counted, agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_countuniq'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCounting unqiue ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract cumulative count feature from different cols
def do_cumcount( df, group_cols, counted,agg_type='uint32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_cumcount'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCumulative count by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### Below a function is written to extract mean feature from different cols
def do_mean( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_mean'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCalculating mean of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
def do_var( df, group_cols, counted, agg_type='float32', show_max=False, show_agg=True ):
agg_name= '{}_by_{}_var'.format(('_'.join(group_cols)),(counted))
if show_agg:
print( "\nCalculating variance of ", counted, " by ", group_cols , '... and saved in', agg_name )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
predictors.append(agg_name)
# print('predictors',predictors)
gc.collect()
return( df )
### A function is written to train the lightGBM model with different given parameters
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=50, num_boost_round=3000, verbose_eval=10, categorical_features=None):
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.01,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.4, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 10, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0.9, # L1 regularization term on weights
'reg_lambda': 0.9, # L2 regularization term on weights
'nthread': 8,
'verbose': 0,
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values.astype('float32'), label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values.astype('float32'), label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=100,
feval=feval)
# save model to file
model_name = "model" + str(FILENO) + ".txt"
bst1.save_model(model_name)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
## Running the full calculation.
#### A function is written here to run the full calculation with defined parameters.
def DO(frm,to,fileno):
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint8',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
# print('loading train data...',frm,to)
# train_df = pd.read_csv("../Data/train.csv", parse_dates=['click_time'], skiprows=range(1,frm), nrows=to-frm, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
# print('loading test data...')
# if debug:
# test_df = pd.read_csv("../Data/test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
# else:
# test_df = pd.read_csv("../Data/test.csv", parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
# len_train = len(train_df)
# train_df=train_df.append(test_df)
# del test_df
# gc.collect()
# train_df = do_next_prev_Click( train_df,agg_suffix='nextClick', agg_type='float32' ); gc.collect()
# # train_df = do_next_prev_Click( train_df,agg_suffix='prevClick', agg_type='float32' ); gc.collect() ## Removed temporarily due RAM sortage.
# train_df = do_countuniq( train_df, ['ip'], 'channel' ); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'device', 'os'], 'app'); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'day'], 'hour' ); gc.collect()
# train_df = do_countuniq( train_df, ['ip'], 'app'); gc.collect()
# train_df = do_countuniq( train_df, ['ip', 'app'], 'os'); gc.collect()
# train_df = do_countuniq( train_df, ['ip'], 'device'); gc.collect()
# train_df = do_countuniq( train_df, ['app'], 'channel'); gc.collect()
# train_df = do_cumcount( train_df, ['ip'], 'os'); gc.collect()
# train_df = do_cumcount( train_df, ['ip', 'device', 'os'], 'app'); gc.collect()
# train_df = do_count( train_df, ['ip', 'day', 'hour'] ); gc.collect()
# train_df = do_count( train_df, ['ip', 'app']); gc.collect()
# train_df = do_count( train_df, ['ip', 'app', 'os']); gc.collect()
# train_df = do_var( train_df, ['ip', 'day', 'channel'], 'hour'); gc.collect()
# train_df = do_var( train_df, ['ip', 'app', 'os'], 'hour'); gc.collect()
# train_df = do_var( train_df, ['ip', 'app', 'channel'], 'day'); gc.collect()
# train_df = do_mean( train_df, ['ip', 'app', 'channel'], 'hour' ); gc.collect()
# print(train_df.head(5))
# gc.collect()
# print('\n\nBefore appending predictors...\n\n',sorted(predictors))
target = 'is_attributed'
word= ['app','device','os', 'channel', 'hour', 'day','minute', 'second']
# for feature in word:
# if feature not in predictors:
# predictors.append(feature)
predictors = ['app', 'app_by_channel_countuniq', 'channel', 'day', 'device', 'hour', 'ip_app_by_os_countuniq',
'ip_app_channel_by_day_var', 'ip_app_channel_by_hour_mean',
'ip_app_os_by_hour_var', 'ip_app_oscount', 'ip_appcount', 'ip_by_app_countuniq', 'ip_by_channel_countuniq',
'ip_by_device_countuniq', 'ip_by_os_cumcount', 'ip_day_by_hour_countuniq', 'ip_day_channel_by_hour_var',
'ip_day_hourcount', 'ip_device_os_by_app_countuniq', 'ip_device_os_by_app_cumcount',
# 'ip_os_device_app_nextClick', 'ip_os_device_nextClick', 'ip_app_device_os_channel_nextClick',
'minute', 'os', 'second']
categorical = ['app', 'device', 'os', 'channel', 'hour', 'day','minute', 'second']
print('\n\nAfter appending predictors...\n\n',sorted(predictors))
# train_df.to_pickle("daniel.pkl")
# df = train_df
df = pd.read_pickle("daniel.pkl")
len_train = to - frm
test_df = df[len_train:]
val_df = df[(len_train-val_size):len_train]
train_df = df[:(len_train-val_size)]
del df
gc.collect()
print("\ntrain size: ", len(train_df))
print("\nvalid size: ", len(val_df))
print("\ntest size : ", len(test_df))
# load model to predict
bst = lgb.Booster(model_file = 'model6.txt')
# search_iterations = [50, 1100, 50]
# for i in range(search_iterations[0], search_iterations[1], search_iterations[2]):
# y_pred = bst.predict(val_df[predictors].values.astype('float32'), num_iteration=i)
# score = metrics.roc_auc_score(val_df[target].values, y_pred)
# loss = metrics.log_loss(val_df[target].values, y_pred)
# print ("Iteration: {0} AUC: {1} Logloss: {2}".format(i, score, loss))
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
gc.collect()
# print("Training...")
# start_time = time.time()
# params = {
# 'learning_rate': 0.01,
# #'is_unbalance': 'true', # replaced with scale_pos_weight argument
# 'num_leaves': 31, # 2^max_depth - 1
# 'max_depth': -1, # -1 means no limit
# 'min_child_samples': 200, # Minimum number of data need in a child(min_data_in_leaf)
# 'max_bin': 200, # Number of bucketed bin for feature values
# 'subsample': 0.8, # Subsample ratio of the training instance.
# 'subsample_freq': 1, # frequence of subsample, <=0 means no enable
# 'colsample_bytree': 0.9, # Subsample ratio of columns when constructing each tree.
# 'min_child_weight': 10, # Minimum sum of instance weight(hessian) needed in a child(leaf)
# 'scale_pos_weight':300 # because training data is extremely unbalanced
# }
# (bst,best_iteration) = lgb_modelfit_nocv(params,
# train_df,
# val_df,
# predictors,
# target,
# objective='binary',
# metrics= 'binary_logloss', #'auc',
# early_stopping_rounds=3000,
# verbose_eval=True,
# num_boost_round=1000,
# categorical_features=categorical)
# print('[{}]: model training time'.format(time.time() - start_time))
# del train_df
# del val_df
# gc.collect()
# ax = lgb.plot_importance(bst, max_num_features=100)
# plt.show()
# plt.savefig('foo.png')
print("Predicting...")
sub['is_attributed'] = bst.predict(test_df[predictors],num_iteration=500) #best_iteration)
# if not debug:
# print("writing...")
sub.to_csv('sub_it%d.csv'%(fileno),index=False,float_format='%.9f')
print("done...")
return sub
####### Chunk size defining and final run ############
nrows=184903891-1
nchunk=12000000
val_size=1200000
frm=nrows-65000000
if debug:
frm=0
nchunk=100000
val_size=10000
to=frm+nchunk
sub=DO(frm,to,FILENO) | apache-2.0 |
arabenjamin/scikit-learn | sklearn/linear_model/__init__.py | 268 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
ChadFulton/statsmodels | statsmodels/datasets/committee/data.py | 1 | 2572 | """First 100 days of the US House of Representatives 1995"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unifited Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of bill assignments in the 104th House in 1995"""
DESCRLONG = """The example in Gill, seeks to explain the number of bill
assignments in the first 100 days of the US' 104th House of Representatives.
The response variable is the number of bill assignments in the first 100 days
over 20 Committees. The explanatory variables in the example are the number of
assignments in the first 100 days of the 103rd House, the number of members on
the committee, the number of subcommittees, the log of the number of staff
assigned to the committee, a dummy variable indicating whether
the committee is a high prestige committee, and an interaction term between
the number of subcommittees and the log of the staff size.
The data returned by load are not cleaned to represent the above example.
"""
NOTE = """::
Number of Observations - 20
Number of Variables - 6
Variable name definitions::
BILLS104 - Number of bill assignments in the first 100 days of the
104th House of Representatives.
SIZE - Number of members on the committee.
SUBS - Number of subcommittees.
STAFF - Number of staff members assigned to the committee.
PRESTIGE - PRESTIGE == 1 is a high prestige committee.
BILLS103 - Number of bill assignments in the first 100 days of the
103rd House of Representatives.
Committee names are included as a variable in the data file though not
returned by load.
"""
def load_pandas():
data = _get_data()
return du.process_pandas(data, endog_idx=0)
def load(as_pandas=None):
"""Load the committee data and returns a data class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def _get_data():
data = du.load_csv(__file__, 'committee.csv')
data = data.iloc[:, 1:7].astype(float)
return data
| bsd-3-clause |
heli522/scikit-learn | examples/linear_model/plot_logistic.py | 309 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/best_kNN_PC/cross_validate_categories_kNN_PC_BMED_8813_HAP_scaled_method_II.py | 1 | 4435 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Edge-1']*30 + ['Surface']*30 + ['Edge-2']*30
PCA_chunk_1 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
DonBeo/scikit-learn | benchmarks/bench_plot_lasso_path.py | 299 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
heli522/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
arabenjamin/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
ChadFulton/statsmodels | statsmodels/datasets/macrodata/data.py | 2 | 3193 | """United States Macroeconomic data"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St.
Louis [1] except the unemployment rate which was taken from the National
Bureau of Labor Statistics [2]. ::
[1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of
St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15,
2009.
[2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor;
http://www.bls.gov/data/; accessed December 15, 2009.
"""
DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 203
Number of Variables - 14
Variable name definitions::
year - 1959q1 - 2009q3
quarter - 1-4
realgdp - Real gross domestic product (Bil. of chained 2005 US$,
seasonally adjusted annual rate)
realcons - Real personal consumption expenditures (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realinv - Real gross private domestic investment (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realgovt - Real federal consumption expenditures & gross investment
(Bil. of chained 2005 US$, seasonally adjusted annual rate)
realdpi - Real private disposable income (Bil. of chained 2005
US$, seasonally adjusted annual rate)
cpi - End of the quarter consumer price index for all urban
consumers: all items (1982-84 = 100, seasonally adjusted).
m1 - End of the quarter M1 nominal money stock (Seasonally
adjusted)
tbilrate - Quarterly monthly average of the monthly 3-month
treasury bill: secondary market rate
unemp - Seasonally adjusted unemployment rate (%)
pop - End of the quarter total population: all ages incl. armed
forces over seas
infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)
realint - Real interest rate (tbilrate - infl)
"""
def load_pandas():
data = _get_data()
return du.Dataset(data=data, names=list(data.columns))
def load(as_pandas=None):
"""
Load the US macro data and return a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The macrodata Dataset instance does not contain endog and exog attributes.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def _get_data():
return du.load_csv(__file__, 'macrodata.csv').astype(float)
variable_names = ["realcons", "realgdp", "realinv"]
def __str__():
return "macrodata"
| bsd-3-clause |
lehmannro/pootle | pootle/legacy/jToolkit/prefs.py | 5 | 30843 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""manages preferences files that are hierarchical and nice for both humans and python"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from pootle.legacy.jToolkit import sparse
import os
import sys
class indent(str):
def __init__(self, value):
self.level = 0
def setlevel(self, level):
self.level = level
def __repr__(self):
return "indent(%d)" % self.level
def evaluateboolean(value, default=False):
"""evaluates the pref value as a boolean"""
if value is None:
return default
if isinstance(value, (str, unicode)):
if value.isdigit():
value = int(value)
else:
value = value.lower() == 'true'
return bool(value)
class PrefNode(object):
def __init__(self, parent, keypart):
if keypart is None:
self.__dict__["_parser"] = parent
self.__dict__["__root__"] = self
self.__dict__["__key__"] = ""
self.__dict__["_setvalue"] = getattr(parent, "setvalue")
self.__dict__["_removekey"] = getattr(parent, "removekey")
self.__dict__["_resolve"] = getattr(parent, "resolveconfigobject")
self.__dict__["_assignments"] = {}
else:
self.__dict__["__root__"] = parent.__root__
if parent.__key__ == "":
self.__dict__["__key__"] = keypart
else:
self.__dict__["__key__"] = parent.__key__ + "." + keypart
def __setattr__(self, keypart, value):
parentpart = self.__key__
if len(parentpart) > 0:
parentpart += "."
self.__root__._setvalue(parentpart + keypart, value)
def __delattr__(self, keypart):
if not keypart in self.__dict__:
object.__delattr__(self, keypart)
elif self.__key__ == "":
self.__root__._removekey(keypart)
else:
self.__root__._removekey(self.__key__ + "." + keypart)
def __getattr__(self, attr):
"""tries to find the attribute, handling unicode if given"""
# TODO: add default support
if "." in attr:
if self.__root__ == self:
return self._parser.getvalue(attr)
return self.__root__.__getattr__(self.__key__ + "." + attr)
if isinstance(attr, basestring) and attr in self.__dict__:
return self.__dict__[attr]
if not (isinstance(attr, unicode) or "." in attr):
return getattr(super(PrefNode, self), attr)
else:
raise AttributeError("%r PrefNode object has no attribute %r" % (self.__class__.__name__, attr))
def __hasattr__(self, attr):
"""tries to find if attribute is present, handling unicode if given"""
if "." in attr:
if self.__root__ == self:
return self._parser.hasvalue(attr)
return self.__root__.__hasattr__(self.__key__ + "." + attr)
if isinstance(attr, basestring):
if attr in self.__dict__:
return True
if not (isinstance(attr, unicode) or "." in attr):
return hasattr(super(PrefNode, self), attr)
return False
def __repr__(self):
return self.__key__
def __str__(self):
return self.__key__
def __getstate__(self):
return (self.__root__._resolve.im_self.__getstate__(), self.__key__)
def __setstate__(self, state):
parserText, key = state
parser = PrefsParser()
parser.parse(parserText)
# Now we need some way of making self the object denoted by state[1]
keys = key.split(".")
oldobject = parser.__root__
parent = None
if not len(keys) == 1 and keys[0] == "":
for keypart in keys:
parent = oldobject
oldobject = oldobject.__getattr__(keypart)
self.__dict__["__key__"] = key
if oldobject.__dict__.has_key("__root__"):
if key == "":
self.__dict__["__root__"] = self
else:
self.__dict__["__root__"] = oldobject.__root__
if oldobject.__dict__.has_key("__base__"):
self.__dict__["__base__"] = oldobject.__base__
PrefNode.copyattrs(oldobject, self)
if parent == None:
parser.__root__ = self
else:
parent.__setattr__(keys[-1], self)
def getlabel(self):
return getattr(self, "__base__", self.__key__)
def copy(oldobject, parent, keypart):
"""
Returns a PrefNode identical to this one, but with a different name and parent
Assignments are duplicated in __root__._assignments so they will be noticed by getsource
"""
newobject = PrefNode(parent, keypart)
PrefNode.copyattrs(oldobject, newobject)
return newobject
def copyattrs(oldobject, newobject):
for childkeypart in oldobject.__dict__:
if childkeypart in ("__root__", "__key__", "__base__"): continue
oldchild = oldobject.__dict__[childkeypart]
if isinstance(oldchild, PrefNode):
newobject.__dict__[childkeypart] = oldchild.copy(newobject, childkeypart)
else:
newobject.__dict__[childkeypart] = oldchild
label = newobject.getlabel() + '.' + childkeypart
newobject.__root__._assignments[label] = oldchild
def relocate(self, newkey):
self.__dict__["__base__"] = self.__key__
self.__dict__["__key__"] = newkey
for childkeypart in self.__dict__:
if childkeypart in ("__root__", "__key__", "__base__"): continue
child = self.__dict__[childkeypart]
if isinstance(child, PrefNode):
child.relocate(newkey + "." + childkeypart)
def renamepref(self,name,newname):
"""
Rename a pref by removing it and adding a new one
name is the pref to rename
newname is what that pref will be renamed to
"""
value = getattr(self,name)
setattr(self,newname,value)
delattr(self,name)
def iteritems(self, sorted=False):
"""iterate through the items, sort them by key if sorted"""
if sorted:
childitems = self.__dict__.items()
childitems.sort()
else:
childitems = self.__dict__.iteritems()
for childkeypart, child in childitems:
if childkeypart in ("__root__", "__key__", "__base__"): continue
if childkeypart in ("_parser", "_setvalue", "_removekey", "_resolve", "_assignments"): continue
yield childkeypart, child
def getparser(self):
"""finds the parser object this belongs to"""
return self.__root__._parser
# TODO: allow UnresolvedPref to resolve prefs using Python imports
class UnresolvedPref:
def __init__(self, root, label):
self.__dict__["__root__"] = root
self.__dict__["__key__"] = label
def __repr__(self):
return self.__key__
def __str__(self):
return self.__key__
def resolve(self):
return self.__root__._resolve(self.__key__)
# TODO: improve handling of object values - strings, integers, classes, modules etc
class PrefsParser(sparse.SimpleParser, object):
def __init__(self, filename=None):
"""sets up the PrefsParser"""
sparse.SimpleParser.__init__(self, includewhitespacetokens = 1)
self.unicodeprefix = "u"
self.standardtokenizers = [self.commenttokenize, self.stringtokenize, \
self.removewhitespace, self.splitatnewline, self.separatetokens]
self.__root__ = PrefNode(self, None)
# Initialise all the token holders
if filename is None:
self.parse("")
else:
self.parsefile(filename)
self.__initialized__ = True
def __setattr__(self, keypart, value):
"""we need to be able to act as the root node in case setattr is called on us directly"""
if getattr(self, "__initialized__", False) and not self.__hasattr__(keypart) and keypart not in ("changes", "filename"):
self.setvalue(keypart, value)
else:
super(PrefsParser, self).__setattr__(keypart, value)
def __getstate__(self):
"""Prepares the object for pickling
This preserves the only thing we actually need to preserve - the text returned from self.getsource()"""
return self.getsource()
def __setstate__(self, state):
"""Unpickles the object
This takes a prefsfile source, created by another parser, and creates a new parser"""
self.__init__() # In case it hasn't been called
self.parse(state)
def keeptogether(self, input):
"""checks whether a token should be kept together"""
# don't retokenize strings
return sparse.SimpleParser.keeptogether(self, input) or self.iscommenttoken(input)
def stringtokenize(self, input):
"""makes strings in input into tokens... but keeps comment tokens together"""
if self.iscommenttoken(input):
return [input]
return sparse.SimpleParser.stringtokenize(self, input)
def iscommenttoken(self, input):
"""checks whether a token is a comment token"""
return input[:1] == "#"
def commenttokenize(self, input):
"""makes comments in input into tokens..."""
if sparse.SimpleParser.keeptogether(self, input): return [input]
tokens = []
incomment = False
laststart = 0
startcommentchar, endcommentchar = '#', '\n'
for pos in range(len(input)):
char = input[pos]
if incomment:
if char == endcommentchar:
if pos > laststart: tokens.append(input[laststart:pos])
incomment, laststart = False, pos
else:
if char == startcommentchar:
if pos > laststart: tokens.append(input[laststart:pos])
incomment, laststart = True, pos
if laststart < len(input): tokens.append(input[laststart:])
return tokens
def splitatnewline(self, input):
"""splits whitespace tokens at newline, putting the newline at the beginning of the split strings
whitespace tokens not containing newlines are discarded"""
if self.keeptogether(input): return [input]
if input.isspace():
lastnewline = input.rfind("\n")
if lastnewline != -1:
return [input[lastnewline:]]
return []
return [input]
def handleindents(self):
"""finds indents in tokens and replaces them with indent objects"""
indentedtokens = []
indentstack = [0]
for tokennum, token in enumerate(self.tokens):
if token[:1] == "\n":
if tokennum+1 < len(self.tokens) and self.iscommenttoken(self.tokens[tokennum+1]):
# treat indents before comments as whitespace, by removing them
continue
tokenlength = len(token[1:])
if tokenlength <= indentstack[-1]:
if tokenlength not in indentstack:
self.raiseerror("invalid indentation", tokennum)
indentstack = indentstack[:indentstack.index(tokenlength)+1]
else:
indentstack.append(tokenlength)
token = indent(token)
token.setlevel(indentstack.index(tokenlength))
indentedtokens.append(token)
if len(indentedtokens) > 0 and not isinstance(indentedtokens[0], indent):
indentedtokens = [indent("")] + indentedtokens
self.tokens = indentedtokens
def parseassignments(self):
"""parses all the assignments from the tokenized preferences"""
self.__dict__.setdefault('removals',{})
self.__dict__.setdefault('valuepos',{})
self.__dict__.setdefault('commentpos',{})
self.__dict__.setdefault('sectionstart',{})
self.__dict__.setdefault('sectionend',{})
assignvar = None
operation = None
lastcomment = None
lastvalue = None
lastsection = None
lastindent = indent("")
context = {}
indentlevels = {}
self.refreshposcache()
for tokennum, token in enumerate(self.tokens):
if isinstance(token, indent):
if token.level < lastindent.level:
parentcontext = ".".join([context[level] for level in range(token.level)])
for level in range(token.level, lastindent.level):
if level in context:
if not parentcontext:
childcontext = context[level]
else:
childcontext = parentcontext + "." + context[level]
self.sectionend[childcontext] = (tokennum, indentlevels[level+1])
parentcontext = childcontext
del context[level]
elif token.level > lastindent.level:
if operation == ':':
operation = None
else:
self.raiseerror("indent without preceding :", tokennum)
lastindent = token
indentlevels[lastindent.level] = token
elif self.iscommenttoken(token):
# if the last value or section found is on the same line
# as this comment then this comment refers to that pref
lastcomment = (tokennum,token)
if (lastvalue is not None) and (lastsection is not None):
commentline,commentcharpos = self.getlinepos(self.findtokenpos(tokennum))
valuenum,value = lastvalue
vline,vpos = self.getlinepos(self.findtokenpos(valuenum))
sectionnum,section = lastsection
sectionline,sectionpos = self.getlinepos(self.findtokenpos(sectionnum))
if commentline == vline:
self.commentpos[value] = tokennum
lastcomment = None
elif commentline == sectionline:
self.commentpos[section] = tokennum
lastcomment = None
elif token == '=':
operation = token
elif token == ':':
context[lastindent.level] = assignvar
operation = token
prefixes = [context[level] for level in range(0, lastindent.level)]
key = ".".join(prefixes+[assignvar])
self.sectionstart[key] = tokennum
lastsection = (tokennum,key)
elif operation == '=':
prefixes = [context[level] for level in range(0, lastindent.level)]
key = ".".join(prefixes+[assignvar])
realvalue = self.parsevalue(token)
self.setvalue(key, realvalue)
self.valuepos[key] = tokennum
operation = None
lastvalue = (tokennum,key)
elif operation is None:
if self.isstringtoken(token):
if token.startswith(self.unicodeprefix):
assignvar = sparse.stringeval(token.replace(self.unicodeprefix, "", 1)).decode("utf-8")
else:
assignvar = sparse.stringeval(token)
else:
assignvar = token
else:
self.raiseerror("I don't know how to parse that here", tokennum)
# handle comments
if operation == '=' or token == ':':
# if the last comment found is on the line before this one then it refers to this pref/section
if lastcomment is not None:
commentnum,comment = lastcomment
commentline,commentcharpos = self.getlinepos(self.findtokenpos(commentnum))
myline,mycharpos = self.getlinepos(self.findtokenpos(tokennum))
if myline == (commentline+1):
if token == ':' and lastsection is not None:
self.commentpos[lastsection[1]] = commentnum
elif operation == '=' and lastvalue is not None:
self.commentpos[lastvalue[1]] = commentnum
def parse(self, prefsstring):
"""this parses a string and returns a base preferences object"""
self.tokenize(prefsstring)
self.handleindents()
self.parseassignments()
self.resolveinternalassignments()
def parsefile(self, filename):
"""this opens a file and parses the contents"""
self.filename = filename
prefsfile = open(filename, 'r')
contents = prefsfile.read()
prefsfile.close()
self.parse(contents)
def savefile(self, filename=None, safesave=False):
"""this saves the source to the given filename"""
if filename is None:
filename = getattr(self, "filename")
else:
self.filename = filename
contents = self.getsource()
if safesave:
dirpart = os.path.abspath(os.path.dirname(filename))
filepart = os.path.basename(filename)
tempfilename = os.tempnam(dirpart, filepart)
prefsfile = open(tempfilename, 'w')
prefsfile.write(contents)
prefsfile.close()
try:
if os.name == 'nt' and os.path.exists(filename):
os.remove(filename)
os.rename(tempfilename, filename)
except OSError, e:
os.remove(tempfilename)
raise e
else:
prefsfile = open(filename, 'w')
prefsfile.write(contents)
prefsfile.close()
def hasvalue(self, key):
"""returns whether the given key is present"""
keyparts = key.split(".")
parent = self.__root__
for keypart in keyparts:
if not parent.__dict__.has_key(keypart):
return False
parent = parent.__dict__[keypart]
return True
def getvalue(self, key):
"""gets the value of the given key"""
keyparts = key.split(".")
parent = self.__root__
for keypart in keyparts:
if not parent.__dict__.has_key(keypart):
raise IndexError("parent does not have child %r when trying to find %r" % (keypart, key))
parent = parent.__dict__[keypart]
return parent
def setvalue(self, key, value):
"""set the given key to the given value"""
if isinstance(value, PrefNode):
value.relocate(key)
# we don't store PrefNodes in assignments
if not isinstance(value, PrefNode):
self.__root__._assignments[key] = value
keyparts = key.split(".")
parent = self.__root__
for keypart in keyparts[:-1]:
if not parent.__dict__.has_key(keypart):
parent.__dict__[keypart] = PrefNode(parent, keypart)
child = parent.__dict__[keypart]
# it is possible that this is overriding a value with a prefnode
if not isinstance(child, PrefNode):
child = PrefNode(parent, keypart)
parent.__dict__[keypart] = child
parent = child
parent.__dict__[keyparts[-1]] = value
def removekey(self, key):
"""remove the given key from the prefs tree. no node removal yet"""
if key in self.__root__._assignments:
del self.__root__._assignments[key]
parent = self.__root__
keyparts = key.split(".")
for keypart in keyparts[:-1]:
if not parent.__dict__.has_key(keypart):
raise ValueError("key %s not found: %s has no child %s" % (key, parent.__key__, keypart))
parent = parent.__dict__[keypart]
attribname = keyparts[-1]
if attribname in parent.__dict__:
deadnode = parent.__dict__[attribname]
if isinstance(deadnode, PrefNode):
for childkey in deadnode.__dict__.keys():
if childkey not in ("__root__", "__key__", "__base__"):
self.removekey(key + "." + childkey)
del parent.__dict__[attribname]
else:
raise ValueError("key %s not found: %s has no child %s" % (key, parent.__key__, attribname))
self.removals[key] = True
def parsevalue(self, value):
"""Parses a value set in a config file, and returns the correct object type"""
if not isinstance(value, (str, unicode)):
return value
# If it's a string, try to parse it
if (value.isdigit()):
return int(value)
elif (value[0] in ['"',"'"] and value[-1] == value[0]):
return sparse.stringeval(value)
elif (value[0] == 'u' and value[1] in ['"',"'"] and value[-1] == value[1]):
return sparse.stringeval(value[1:]).decode("utf-8")
else:
return self.resolveconfigobject(value)
def quotevalue(self, value):
"""converts a realvalue from parsevalue back to a string that can be stored in the prefs file"""
if isinstance(value, int) or isinstance(value, long):
return str(value)
elif isinstance(value, PrefNode):
return value.getlabel()
elif isinstance(value, UnresolvedPref):
return repr(value)
elif isinstance(value, str):
return sparse.stringquote(value)
elif isinstance(value, unicode):
return "u" + sparse.stringquote(value.encode("utf-8"))
elif value is None:
return ""
else:
raise ValueError("don't know how to quote %r value: %r" % (type(value), value))
def resolveconfigobject(self, value):
"""Tries to find the object specified by "value" as a member of self
Should be overridden if more types of objects are available"""
valueparts = value.split(".")
parent = self.__root__
if hasattr(self.__root__, 'importmodules') and hasattr(self.__root__.importmodules,valueparts[0])\
and not hasattr(self.__root__,valueparts[0]):
parent = self.__root__.importmodules
found = True
for valuepart in valueparts:
if isinstance(parent, PrefNode):
# handle resolving for standard nodes...
if hasattr(parent,valuepart):
parent = parent.__dict__[valuepart]
else:
found = False
break
else:
# handle resolving for other objects...
if hasattr(parent, valuepart):
parent = getattr(parent, valuepart)
else:
found = False
break
if found:
if isinstance(parent, PrefNode):
return parent.copy(self.__root__, value)
else:
# other objects can't be copied, so just return them...
return parent
elif value in self.__root__._assignments:
return self.__root__._assignments[value]
else:
return UnresolvedPref(self.__root__, value)
def resolveinternalassignments(self):
"""resolves any unresolved assignments that are internal (i.e. don't rely on imports)"""
unresolved = len(self.__root__._assignments)
lastunresolved = unresolved+1
while unresolved < lastunresolved:
lastunresolved = unresolved
unresolved = 0
for key in self.__root__._assignments:
currentvalue = self.__root__._assignments[key]
if isinstance(currentvalue, UnresolvedPref):
unresolved += 1
newvalue = currentvalue.resolve()
if newvalue != currentvalue:
self.setvalue(key, newvalue)
def resolveimportmodules(self, mapmodulename=None):
"""actually imports modules specified in importmodules. not used unless explicitly called"""
# import any required modules
for refname, modulename in self.importmodules.iteritems():
if mapmodulename:
modulename = mapmodulename(modulename)
try:
module = __import__(modulename, globals(), locals())
except ImportError, importmessage:
errormessage = "Error importing module %r: %s\nPython path is %r" \
% (modulename, importmessage, sys.path)
raise ImportError(errormessage)
components = modulename.split('.')
for component in components[1:]:
module = getattr(module, component)
importmodulekey = self.importmodules.__key__ + "." + refname
self.setvalue(importmodulekey, module)
# TODO: this is a hack. add this resolving into prefs so we don't have to delete the assignment
# we currently delete the assignment so that the prefsfile can be safely saved
del self.__root__._assignments[importmodulekey]
modulevalue = getattr(self.importmodules, refname)
def addKeyToDict(self, keyparts, value, dictionary):
if len(keyparts) == 1:
dictionary[keyparts[0]] = value
else:
dictionary.setdefault(keyparts[0],{})
self.addKeyToDict(keyparts[1:],value,dictionary[keyparts[0]])
def writeDict(self, dictionary, indent):
result = ""
sortedKeys = dictionary.keys()
sortedKeys.sort()
for key in sortedKeys:
quotedkey = key
if isinstance(quotedkey, unicode):
try:
quotedkey = str(quotedkey)
except UnicodeError:
pass
if isinstance(quotedkey, unicode):
quotedkey = "u" + sparse.stringquote(quotedkey.encode("utf-8"))
else:
testalphakey = quotedkey.replace("_", "a").replace(".", "0")
if not (testalphakey[:1].isalpha() and testalphakey.isalnum()):
quotedkey = sparse.stringquote(quotedkey)
if isinstance(dictionary[key], dict):
result += indent + quotedkey + ":\n"
result += self.writeDict(dictionary[key], indent+" ")
else:
result += indent + "%s = %s\n" % (quotedkey, dictionary[key])
return result
def findimmediateparent(self, key):
"""finds the most immediate indented parent of the given key"""
keyparts = key.split(".")
for keynum in range(len(keyparts),-1,-1):
parentkey = ".".join(keyparts[:keynum])
if parentkey in self.sectionend:
return parentkey
return None
def addchange(self, tokennum, tokenschanged, newtokens, parentstart=None):
"""adds a change to self.changes"""
if tokennum in self.changes:
tokenchanges = self.changes[tokennum]
else:
tokenchanges = {}
self.changes[tokennum] = tokenchanges
if parentstart in tokenchanges:
tokenchanges[parentstart].append((tokenschanged, newtokens))
else:
tokenchanges[parentstart] = [(tokenschanged, newtokens)]
def getsource(self):
"""reconstructs the original prefs string with any changes that have been made..."""
# changes is a dictionary, key is the position of the change
# each value is a list of changes at that position
# each change in the list is a tuple of the number of tokens to remove, and the new string
self.changes = {}
extradict = {}
for key in self.__root__._assignments:
currentvalue = self.__root__._assignments[key]
keyfound = key in self.valuepos
# try and handle key being encoded or not sensibly...
if not keyfound:
try:
if isinstance(key, str):
otherkey = key.decode("utf-8")
elif isinstance(key, unicode):
otherkey = key.encode("utf-8")
keyfound = otherkey in self.valuepos
except:
pass
if keyfound:
# the key exists. change the value
tokennum = self.valuepos[key]
if currentvalue != self.parsevalue(self.tokens[tokennum]):
self.addchange(tokennum, 1, self.quotevalue(currentvalue))
else:
# the key doesn't yet exist...
keyparts = key.split(".")
parentkey = self.findimmediateparent(key)
if parentkey is not None:
nodetokennum, nodeindent = self.sectionend[parentkey]
parentstart = self.sectionstart[parentkey]
keynum = parentkey.count(".") + 1
childkey = ".".join(keyparts[keynum:])
testalphakey = childkey.replace("_", "a").replace(".", "0")
needsquoting = False
if isinstance(testalphakey, unicode):
try:
testalphakey = testalphakey.encode("ascii")
except UnicodeEncodeError:
needsquoting = True
if not (testalphakey[:1].isalpha() and testalphakey.isalnum()):
needsquoting = True
if needsquoting:
childkey = self.quotevalue(childkey)
quotedvalue = nodeindent + "%s = %s" % (childkey, self.quotevalue(currentvalue))
self.addchange(nodetokennum, 0, quotedvalue, parentstart=parentstart)
else:
self.addKeyToDict(keyparts, self.quotevalue(currentvalue), extradict)
for key in self.removals:
if key in self.valuepos:
tokennum = self.valuepos[key]
elif key in self.sectionstart:
tokennum = self.sectionstart[key]
else:
# then we don't know how to remove it
# extras.append("# tried to remove key but could not find it: %s" % key)
continue
# to remove something, we search backwards from the value for an indent
# and slice out the section from the indent to the value
starttokennum = tokennum
while starttokennum >= 0:
if isinstance(self.tokens[starttokennum], indent):
if starttokennum == 0:
starttokennum += 1
break
starttokennum -= 1
if starttokennum > 0:
self.addchange(starttokennum, tokennum+1-starttokennum, "")
else:
# if we didn't find it, leave a note...
self.addchange(tokennum, 1, "none")
# now regenerate the source including the changes...
tokennums = self.changes.keys()
tokennums.sort()
lastpos = 0
newsource = ""
for tokennum in tokennums:
tokenpos = self.findtokenpos(tokennum)
newsource += self.source[lastpos:tokenpos]
totalremovetokens = 0
parentstarts = self.changes[tokennum].keys()
parentstarts.sort()
parentstarts.reverse()
for parentstart in parentstarts:
for removetokens, newtext in self.changes[tokennum][parentstart]:
if isinstance(newtext, unicode):
newtext = newtext.encode("utf-8")
newsource += newtext
totalremovetokens += removetokens
# work out the position of the last token, and put lastpos at the end of this token
lasttokenpos = tokennum + totalremovetokens - 1
if len(self.tokens) > lasttokenpos:
lastpos = self.findtokenpos(lasttokenpos) + len(self.tokens[lasttokenpos])
newsource += self.source[lastpos:]
if extradict:
if newsource and newsource[-1] != "\n": newsource += "\n"
newsource += self.writeDict(extradict, "")
return newsource
def getcomment(self,key):
"""
returns the comment associated with this the key given,
or none if there is no such key
"""
tokennum = self.commentpos.get(key,None)
if tokennum:
return self.tokens[tokennum]
else:
return None
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
elif '__root__' in self.__dict__:
if isinstance(attr, unicode):
return self.__root__.__getattr__(attr)
return getattr(self.__root__, attr)
raise AttributeError("'PrefsParser' object has no attribute %s" % attr)
def __hasattr__(self, attr):
if attr in self.__dict__:
return True
elif '__root__' in self.__dict__:
if isinstance(attr, unicode):
return self.__root__.__hasattr__(attr)
return hasattr(self.__root__, attr)
if __name__ == "__main__":
import sys
parser = PrefsParser()
originalsource = sys.stdin.read()
parser.parse(originalsource)
import pickle
pickled = pickle.dumps(parser)
recreatedsource = parser.getsource()
if recreatedsource != originalsource:
print >>sys.stderr, "recreatedsource != originalsource"
sys.stdout.write(recreatedsource)
unpickled = pickle.loads(pickled)
sys.stdout.write("===========================================\n")
sys.stdout.write(unpickled.getsource())
if unpickled.getsource() != recreatedsource:
print >>sys.stderr, "unpickledsource != recreatedsource"
| gpl-2.0 |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test10_cross_validate_categories_400ms.py | 1 | 4371 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window')
from data_400ms import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:10]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=9)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
DonBeo/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 375 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
DonBeo/scikit-learn | examples/cluster/plot_color_quantization.py | 295 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 32 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/distribute/input_lib_test.py | 10 | 81506 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the input_lib library."""
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.data.ops.options import AutoShardPolicy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import input_util
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute.v1 import input_lib as input_lib_v1
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import extension_type
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util as framework_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor as ragged_tensor_lib
from tensorflow.python.util import nest
class DistributedIteratorTestBase(test.TestCase):
# The passed input_context is to create a sharded dataset in between-graph
# case.
# TODO(yuefengz): rewrite the following method to make it less DRY.
def _wrap_iterator(self,
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=None):
# The `input_context` passed in is to shard dataset for
# MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where
# multiple InputContexts are needed.
if input_type == "input_fn":
self.assertIsNone(
input_context,
msg=("`The input_context` arg is only used to shard dataset in "
"`MultiWorkerMirroredStrategy` when the input type is dataset."))
input_contexts = []
for i in range(input_workers.num_workers):
input_contexts.append(
distribute_lib.InputContext(
# Note: `input_workers.num_workers` is always 1 in between-graph
# case.
num_input_pipelines=input_workers.num_workers,
input_pipeline_id=i,
num_replicas_in_sync=len(devices)))
iterator = input_lib_v1.InputFunctionIterator(dataset_or_input_fn,
input_workers,
input_contexts, strategy)
else:
iterator = input_lib_v1.DatasetIterator(
dataset_or_input_fn,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
return iterator
def _wrap_dataset(self,
input_type,
dataset,
input_workers,
num_replicas_in_sync,
strategy,
input_context=None):
if input_type == "dataset":
if tf2.enabled():
return input_lib.DistributedDataset(
input_workers,
strategy,
dataset,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return input_lib_v1.DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
else:
return strategy.distribute_datasets_from_function(dataset)
def _assert_iterator_values(self,
iterator,
expected_values,
evaluate_fn,
devices,
enable_get_next_as_optional=False):
actual_values = []
for _ in range(len(expected_values)):
if enable_get_next_as_optional:
next_element = iterator.get_next_as_optional().get_value()
else:
next_element = iterator.get_next()
computed_value = evaluate_fn([
distribute_utils.select_replica(r, next_element)
for r in range(len(devices))
])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _assert_dataset_values_for_loop(self, dataset, expected_values,
evaluate_fn, devices):
actual_values = []
for x in dataset:
computed_value = self.evaluate(
[distribute_utils.select_replica(r, x) for r in range(len(devices))])
actual_values.append(computed_value)
for expected_value, actual_value in zip(expected_values, actual_values):
for expected, actual in zip(expected_value, actual_value):
self.assertAllEqual(expected, actual)
def _test_input_iteration(self,
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
strategy,
sess=None,
num_replicas_in_sync=None,
input_context=None):
if iteration_type == "for_loop" and not context.executing_eagerly():
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and iteration_type == "for_loop":
self.skipTest("unsupported test combination.")
if api_type == "wrap_into_iterator" and input_type == "input_fn":
self.skipTest("unsupported test combination.")
devices = nest.flatten([ds for _, ds in worker_device_pairs])
input_workers = input_lib.InputWorkers(worker_device_pairs)
if api_type == "wrap_into_iterator":
iterator = self._wrap_iterator(
input_type,
dataset_or_input_fn,
input_workers,
devices,
num_replicas_in_sync,
strategy,
input_context=input_context)
else:
# wrapping into a dataset:
dataset = self._wrap_dataset(
input_type,
dataset_or_input_fn,
input_workers,
num_replicas_in_sync,
strategy,
input_context=input_context)
if ops.executing_eagerly_outside_functions():
iterator = iter(dataset)
else:
if isinstance(dataset, input_lib_v1.DistributedDatasetV1):
iterator = dataset.make_initializable_iterator()
else:
self.skipTest("unsupported test combination")
if isinstance(iterator, composite_tensor.CompositeTensor):
nest.assert_same_structure(
iterator, iterator._type_spec, expand_composites=True)
if iteration_type == "get_next":
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
def test_get_next(iterator):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
with self.assertRaises(errors.OutOfRangeError):
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
# After re-initializing the iterator, should be able to iterate again.
if not ops.executing_eagerly_outside_functions():
evaluate(control_flow_ops.group(iterator.initializer))
else:
if api_type == "wrap_into_iterator":
self.skipTest("unsupported test combination")
else:
iterator = iter(dataset)
self._assert_iterator_values(iterator, expected_values, evaluate,
devices)
def test_get_next_as_optional(iterator):
self._assert_iterator_values(
iterator,
expected_values,
evaluate,
devices,
enable_get_next_as_optional=True)
next_element = iterator.get_next_as_optional()
self.assertFalse(self.evaluate(next_element.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self._assert_iterator_values(
iterator, [0],
evaluate,
devices,
enable_get_next_as_optional=True)
test_get_next(iterator)
# re-initializing the iterator
if not tf2.enabled():
# TODO(yuefengz): we should split this function.
return
else:
if api_type == "wrap_into_iterator":
return
else:
iterator = iter(dataset)
test_get_next_as_optional(iterator)
if iteration_type == "for_loop" and context.executing_eagerly():
self._assert_dataset_values_for_loop(dataset, expected_values,
self.evaluate, devices)
def _create_dataset_or_input_fn(self, input_type, input_fn):
if input_type == "input_fn":
return input_fn
else:
return input_fn(distribute_lib.InputContext())
class DistributedIteratorTest(DistributedIteratorTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
]))
def testMultiDeviceIterInitialize(self, distribution):
if tf2.enabled():
self.skipTest("Only V1 is supported.")
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
input_workers = input_lib.InputWorkers(worker_device_pairs)
dist_dataset = input_util.get_distributed_dataset(
dataset_fn(distribute_lib.InputContext()), input_workers, distribution)
iterator = dataset_ops.make_one_shot_iterator(dist_dataset)
@def_function.function
def init_func_for_iter():
self.evaluate(iterator.initializer)
init_func_for_iter()
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x1_cpu],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPUMultiWorker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.tpu_strategy],
enable_get_next_as_optional=[True, False]))
def testTPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = collections.OrderedDict()
for tpu_device in distribution.extended.worker_devices:
host_device = device_util.get_host_for_device(tpu_device)
worker_device_pairs.setdefault(host_device, [])
worker_device_pairs[host_device].append(tpu_device)
worker_device_pairs = worker_device_pairs.items()
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTupleDataset(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
],
enable_get_next_as_optional=[True, False]))
def testTupleDatasetMultiworker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
# Input_context is not passed in and thus no sharding.
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testIterableIterator(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(10)
dist_dataset = input_util.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(distribution.experimental_local_results(element), [i])
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_one_cpu,
]))
def testIterableIteratorError(self, distribution):
dataset = dataset_ops.Dataset.range(10).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
# Raises error when next(iterator) is called without strategy scope
with self.assertRaises(ValueError):
def replica_fn1(iterator):
return next(iterator)
distribution.run(replica_fn1, args=(iterator,))
if distribution.num_replicas_in_sync == 1:
expected_result = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8, 9]]]
elif distribution.num_replicas_in_sync == 2:
expected_result = [[[0], [1]], [[2], [3]], [[4], [5]], [[6], [7]],
[[8], [9]]]
with distribution.scope():
def replica_fn2(iterator):
return iterator
result = distribution.run(replica_fn2, args=(next(iterator),))
self.assertAllEqual(
distribution.experimental_local_results(result), expected_result[0])
# Confirm default ReplicaContext also works
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(
distribution.experimental_local_results(element), expected_result[i])
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type,
drop_remainder, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch( # pylint: disable=g-long-lambda
2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
else:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testUnevenDatasetBatchesMultiWorker(self, input_type, api_type,
iteration_type, drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(9)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]]]
else:
# The last global batch only contains data for one replica.
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]], [[8]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]], [[]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
]))
def testUnevenDatasetBatchesMultiWorkerFourReplicas(self, input_type,
api_type, iteration_type,
drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(15)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(4, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]]]
else:
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]], [[12], [14]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]], [[13], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testBatchSplitting(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
batch_size = 10
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [[
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
],
enable_get_next_as_optional=[True, False]))
def testBatchSplittingMultiWorker(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
batch_size = 10
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(100).batch(batch_size)
return dataset
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [
[ # pylint: disable=g-complex-comprehension
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
))
def testCacheAcrossIteration(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(16).shuffle(16).cache().batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
reshuffle=[True, False]))
def testShuffleAcrossIterations(self, distribution, reshuffle):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(12).shuffle(
12, reshuffle_each_iteration=reshuffle).batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
if reshuffle:
self.assertNotAllEqual(first_epoch, second_epoch)
else:
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeFinite(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
@def_function.function
def train_fn():
for data in dist_dataset:
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([None, 10],
feature[replica_id].shape.as_list())
self.assertEqual([None], label[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeInfinite(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat()
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
data = iter(dist_dataset).get_next_as_optional().get_value()
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature[replica_id].shape.as_list())
self.assertEqual([per_replica_batch_size],
label[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeEmpty(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat()
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
data = iter(dist_dataset).get_next_as_optional()
feature_specs = data.element_spec["feature"]._component_specs
value_specs = data.element_spec["label"]._component_specs
if not isinstance(feature_specs, tuple):
feature_specs = (feature_specs,)
value_specs = (value_specs,)
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature_specs[replica_id].shape.as_list())
self.assertEqual([per_replica_batch_size],
value_specs[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))
def testAutoshardingOption(self, distribution, input_type, api_type,
iteration_type, auto_shard_policy):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
ds_option = options_lib.Options()
ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset_fn = (
lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if auto_shard_policy == AutoShardPolicy.AUTO:
if id_in_cluster == 0:
expected_values = [[0], [2]]
else:
expected_values = [[1], [3]]
else:
expected_values = [[0], [1], [2], [3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["input_fn"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testDifferentDatasetsMultiWorker(self, distribution, input_type, api_type,
iteration_type):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if id_in_cluster == 0:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[]]]
else:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"]))
def testLoopOverDatasetInTFFunction(self, strategy):
dataset = dataset_ops.Dataset.range(10).map(lambda x: { # pylint: disable=g-long-lambda
"y": math_ops.cast(x, dtypes.float32) ** 2,
}).batch(4)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
v = variables.Variable(0.0, aggregation=variables.VariableAggregation.SUM)
@def_function.function
def iterator_fn(dist_dataset):
def assign_add_fn(data):
v.assign_add(math_ops.reduce_sum(data["y"]))
for data in dist_dataset:
strategy.run(assign_add_fn, args=(data,))
iterator_fn(dist_dataset)
self.assertEqual(v.numpy(), 285.0)
class DistributedIteratorTensorTypeTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for DistributedDataset with non-dense tensors."""
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
defun_type=["lambda", "tf_function"],
))
def testRaggedSparse(self, distribution, input_type, drop_remainder,
defun_type):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
self.skipTest("b/213596871, b/214574707")
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
defun = {
"lambda": lambda f: f,
"tf_function": def_function.function
}[defun_type]
distribution.extended.experimental_enable_get_next_as_optional = True
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
dataset = self._wrap_dataset(input_type, dataset_or_input_fn,
distribution.extended._input_workers,
distribution.num_replicas_in_sync,
distribution)
# Assert that the tensors are rebatched and sparsity is preserved.
per_replica_batch = defun(lambda x: next(iter(x)))(dataset)
self.assertAllEqual(
distribute_utils.select_replica(0, per_replica_batch["dense"]),
[[0., 0., 0.], [1., 0., 0.], [2., 2., 0.], [3., 3., 3.]])
self.assertAllEqual(
distribute_utils.select_replica(1, per_replica_batch["dense"]),
[[0., 0., 0.], [5., 0., 0.], [6., 6., 0.], [7., 7., 7.]])
# Transitively check the ragged and sparse tensors by densification.
for i in range(2):
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["ragged"]).values,
6)
self.assertAllEqual(
distribute_utils.select_replica(
i, per_replica_batch["ragged"]).to_tensor(),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
self.assertLen(
distribute_utils.select_replica(i,
per_replica_batch["sparse"]).indices,
6)
self.assertAllEqual(
sparse_ops.sparse_tensor_to_dense(
distribute_utils.select_replica(i, per_replica_batch["sparse"])),
distribute_utils.select_replica(i, per_replica_batch["dense"]))
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
per_replica_sums = distribution.run(
(lambda x: math_ops.reduce_sum(x.values)) if all(
map(sparse_tensor.is_sparse, per_replica_values.values)) else
math_ops.reduce_sum, (per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_for_loop(dataset):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
for batch in dataset:
sums = _reduce(sums, batch)
return sums
def sum_while_loop(iterator, reduce_fn):
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
while True:
try:
sums = reduce_fn(sums, iterator)
except (StopIteration, errors.OutOfRangeError):
return sums
while_sums = sum_while_loop(
iter(dataset),
defun(lambda state, iterator: _reduce(state, next(iterator))))
self.assertAllEqual(
nest.flatten(while_sums),
# When there's no partial batch, the sum is smaller.
[200. if drop_remainder else 310.] * 3)
for_sums = defun(sum_for_loop)(dataset)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if (not drop_remainder or
(defun_type == "tf_function" and input_type == "input_fn")):
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(for_sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
tensor_type=["sparse", "ragged"],
enable_get_next_as_optional=[True, False]))
def testRaggedSparseGetNextAsOptional(self, distribution, input_type,
drop_remainder, tensor_type,
enable_get_next_as_optional):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
tensor_type: (ragged_tensor if tensor_type == "ragged" else
ragged_tensor.to_sparse()),
})
dataset = dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
return dataset.batch(batch_size, drop_remainder=drop_remainder)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
iterator = iter(ds)
self.assertEqual(iterator._enable_get_next_as_optional,
(not drop_remainder) and enable_get_next_as_optional)
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
],
input_type=["dataset", "input_fn"],
drop_remainder=[False, True],
))
def testRaggedSparseGetNextAsOptionalInLoop(self, distribution, input_type,
drop_remainder):
"""Test with `RaggedTensor`s and `SparseTensor`s."""
global_batch_size = 8
def dataset_fn(ctx=None):
ctx = ctx or distribute_lib.InputContext()
batch_size = ctx.get_per_replica_batch_size(global_batch_size)
# Use 20 which isn't divisible by 8 to test partial batch behavior.
row_lengths = np.mod(np.arange(20), 4).astype(np.int64)
ragged_tensor = ragged_tensor_lib.RaggedTensor.from_row_lengths(
np.repeat(np.arange(20, dtype=np.float32), row_lengths), row_lengths)
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"dense": ragged_tensor.to_tensor(),
"ragged": ragged_tensor,
"sparse": ragged_tensor.to_sparse(),
})
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset.shard(ctx.num_input_pipelines, ctx.input_pipeline_id)
if input_type == "dataset":
ds = distribution.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
else:
ds = distribution.distribute_datasets_from_function(dataset_fn)
# Iterate through all the batches and sum them up.
def sum_batch(per_replica_features):
"""Sums the `PerReplica` values in the `per_replica_features` map."""
def map_fn(per_replica_values):
def _sum(value):
if sparse_tensor.is_sparse(value):
return math_ops.reduce_sum(value.values)
else:
return math_ops.reduce_sum(value)
per_replica_sums = distribution.run(_sum, args=(per_replica_values,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_sums, axis=None)
return nest.map_structure(map_fn, per_replica_features)
def _reduce(state, batch):
sums = sum_batch(batch)
return {name: value + sums[name] for name, value in state.items()}
def sum_while_loop(ds):
iterator = iter(ds)
sums = {"dense": 0., "ragged": 0., "sparse": 0.}
try_next = constant_op.constant(True)
while try_next:
opt_iterate = iterator.get_next_as_optional()
if opt_iterate.has_value():
sums = _reduce(sums, opt_iterate.get_value())
else:
try_next = False
return sums
sums = def_function.function(sum_while_loop)(ds)
# For loops always call get next as optional inside tf functions, so we
# expect 310 here when using an input function (as there are 5 batches of
# size 4 round robined over 2 replicas.
expected_for_sum = 200.
if not drop_remainder or input_type == "input_fn":
expected_for_sum = 310.
self.assertAllEqual(nest.flatten(sums), [expected_for_sum] * 3)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatch(self, input_type, api_type, iteration_type,
distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(12).batch(8)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9, 10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testMWMSPartialBatchWithLegacyRebatch(self, input_type, api_type,
iteration_type, distribution):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior when we have two files each with
# 12 elements and a global batch size of 8. When we consider the dataset in
# aggregate (non-distributed), there are 24 elements divided into 3 batches
# of size 8. Hence, the correct distributed behavior is for each replica to
# see sub-batches of size 4, over three steps. However, when we create a
# DistributedDataset and cannot statically infer the intended global batch
# size (e.g. if the user does not use a batching dataset), each worker will
# rebatch based on the dynamic batch size of the data encountered, even when
# it encounters partial batches. The last per-worker partial batch (size 4)
# ends up being split into two replicas, resulting in 4 steps in total, of
# (global) batch sizes 8, 8, 4, 4.
def dataset_fn(ctx):
del ctx
# The following dataset is equivalent to
# tf.data.Dataset.range(12).batch(8), but does not use a batching dataset.
# This causes DistributedDataset to use LegacyRebatch instead.
batch_sizes = dataset_ops.Dataset.from_tensor_slices([8, 4])
offsets = dataset_ops.Dataset.from_tensor_slices([0, 8])
dataset = dataset_ops.Dataset.zip((offsets, batch_sizes))
def map_fn(offset, batch_size):
return math_ops.range(offset, offset + batch_size)
dataset = dataset.map(map_fn)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is equivalent to
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as the number of global
# replicas is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. Each worker should rebatch its dataset into
# smaller batches of size 4.
expected_values = [[[0, 1, 2, 3]], [[4, 5, 6, 7]], [[8, 9]], [[10, 11]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.DATA]))
def testMWMSWithDataSharding(self, input_type, api_type, iteration_type,
distribution, auto_shard_policy):
# Test case: 2 workers, 1 replica each.
# This test simulates the sharded behavior the dataset is sharded by data
# and the batch size is indivisible by the number of replicas. This checks
# that the elements are as expected and the batch size across all workers
# adds up to 3. This test will only pass if the autoshard rewrite rewrites
# RebatchDatasetV2 to legacy RebatchDataset when sharding by data.
def dataset_fn(ctx):
del ctx
dataset = dataset_ops.Dataset.range(8).batch(3)
# Set the sharding behavior to OFF for simplicity of test setup; namely,
# `dataset` defines the per-worker dataset and will not be further
# sharded. Each worker will see a dataset that is
# tf.data.Dataset.range(12).batch(8).rebatch(...).
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
return dataset
dataset = self._create_dataset_or_input_fn(input_type, dataset_fn)
# Actual devices don't matter in this test as long as there is 1 local
# replica.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
# Each test runs individually on each worker, so we compare the
# values on each worker. We expect each worker to see different shards of
# data.
cr = distribution.cluster_resolver
worker_id = multi_worker_util.id_in_cluster(cr.cluster_spec(), cr.task_type,
cr.task_id)
if worker_id == 0:
expected_values = [[[0, 1]], [[3, 4]], [[6]]]
elif worker_id == 1:
expected_values = [[[2]], [[5]], [[7]]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@framework_test_util.with_eager_op_as_function
class DistributedIteratorPerDeviceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for PER_WORKER and PER_REPLICA's InputOptions variants."""
def setUp(self):
context._reset_context()
strategy_combinations.set_virtual_cpus_to_at_least(3)
super(DistributedIteratorPerDeviceTest, self).setUp()
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForPerWorkerValuesWithPrefetch(self, distribution,
input_options):
def dataset_fn(input_context): # pylint: disable=[unused-argument]
return dataset_ops.Dataset.from_tensor_slices([1, 2, 3, 4])
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
assert x.values[0].device == distribution.extended.worker_devices[0]
assert x.values[0].backing_device == distribution.extended.worker_devices[
0]
assert x.values[1].device == distribution.extended.worker_devices[1]
assert x.values[1].backing_device == distribution.extended.worker_devices[
1]
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER)
],
mode=["eager"],
))
def testDevicePlacementForPerWorkerValuesWithoutPrefetch(
self, distribution, input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
for x in ds:
x = distribution.run(lambda inputs: inputs, args=(x,))
assert x.values[
0].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
0].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].device == "/job:localhost/replica:0/task:0/device:CPU:0"
assert x.values[
1].backing_device == "/job:localhost/replica:0/task:0/device:CPU:0"
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA)
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testDevicePlacementForInvalidCombinations(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.full(4, input_context.input_pipeline_id))
with self.assertRaises(ValueError):
distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_per_replica_buffer_size=2),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_per_replica_buffer_size=2),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testPrefetchBufferSizeInputOptions(self, distribution, input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 11).reshape(
(2, 5)) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
# validating the values
x = next(iter(ds))
assert np.array_equal(x.values[0].numpy(), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(x.values[1].numpy(), np.array([6, 7, 8, 9, 10]))
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_WORKER),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerWorkerInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 11).reshape(
(2, 5)) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
# validating the values
x = next(iter(ds))
assert np.array_equal(x.values[0].numpy(), np.array([1, 2, 3, 4, 5]))
assert np.array_equal(x.values[1].numpy(), np.array([6, 7, 8, 9, 10]))
@combinations.generate(
combinations.combine(
input_options=[
distribute_lib.InputOptions(
experimental_place_dataset_on_device=True,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=False,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
distribute_lib.InputOptions(
experimental_place_dataset_on_device=False,
experimental_fetch_to_device=True,
experimental_replication_mode=distribute_lib
.InputReplicationMode.PER_REPLICA),
],
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
strategy_combinations.mirrored_strategy_with_cpu_1_and_2,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
]))
def testOutputValuesForPerReplicaInputOptions(self, distribution,
input_options):
def dataset_fn(input_context):
return dataset_ops.Dataset.from_tensor_slices(
np.arange(1, 10) * (input_context.input_pipeline_id + 1))
ds = distribution.experimental_distribute_datasets_from_function(
dataset_fn, input_options)
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
for i, x in enumerate(ds):
# validating the values
assert x.values[0].numpy() == expected[i]
assert x.values[1].numpy() == expected[i] * 2
loop_num = i
assert loop_num == len(expected) - 1
class DistributedIteratorTfDataServiceTest(DistributedIteratorTestBase,
parameterized.TestCase):
"""Tests for distributed iterators which read from tf.data service."""
def setUp(self):
super(DistributedIteratorTfDataServiceTest, self).setUp()
self.num_workers = 3
if combinations.in_main_process():
self.dispatcher = server_lib.DispatchServer()
self.workers = []
for _ in range(self.num_workers):
self.workers.append(
server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=self.dispatcher.target.split("://")[1],
heartbeat_interval_ms=100,
dispatcher_timeout_ms=1000)))
combinations.env().tf_data_service_dispatcher = self.dispatcher.target
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testTfDataService(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(1, 50)
dataset = dataset.apply(
data_service_ops._distribute(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=combinations.env().tf_data_service_dispatcher,
job_name="foo"))
dist_dataset = input_util.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
results = []
for element in iterator:
local_results = distribution.experimental_local_results(element)
for result in local_results:
# input_lib.distributed_dataset may add extra '0' elements to pad
# per-replica results.
if result.numpy() != 0:
results.append(result.numpy())
self.assertNotEmpty(results)
gathered = distribution.gather(constant_op.constant(results), axis=0)
self.assertCountEqual(self.num_workers * list(range(1, 50)), gathered)
histogram_proto = (
input_lib._distributed_dataset_initialization_time_milliseconds
.get_cell(distribution.__class__.__name__, "1").value())
self.assertGreater(histogram_proto.num, 0.0)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testDistributeDatasetFromFunction(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
input_contexts = []
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=num_workers))
dataset = dataset_ops.Dataset.range(1, 50)
dataset_id = data_service_ops.register_dataset(
service=combinations.env().tf_data_service_dispatcher,
dataset=dataset)
def dataset_fn(input_context):
del input_context
return data_service_ops.from_dataset_id(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=combinations.env().tf_data_service_dispatcher,
dataset_id=dataset_id,
element_spec=dataset.element_spec,
job_name="shared_job")
dist_dataset = input_util.get_distributed_datasets_from_function(
dataset_fn, input_workers, input_contexts, distribution)
iterator = iter(dist_dataset)
results = []
for element in iterator:
local_results = distribution.experimental_local_results(element)
for result in local_results:
# input_lib.distributed_dataset may add extra '0' elements to pad
# per-replica results.
if result.numpy() != 0:
results.append(result.numpy())
self.assertNotEmpty(results)
gathered = distribution.gather(constant_op.constant(results), axis=0)
self.assertCountEqual(self.num_workers * list(range(1, 50)), gathered)
histogram_proto = (
input_lib
._distributed_dataset_from_function_initialization_time_milliseconds
.get_cell(distribution.__class__.__name__, "1").value())
self.assertGreater(histogram_proto.num, 0.0)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testDistributeDatasetFromFunctionNested(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
input_contexts = []
num_workers = input_workers.num_workers
for i in range(num_workers):
input_contexts.append(
distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=num_workers))
class InnerType(extension_type.ExtensionType):
tensor: ops.Tensor
class OuterType(extension_type.ExtensionType):
inner: InnerType
def dataset_fn(input_context):
del input_context
def data_fn(batch_id) -> OuterType:
del batch_id
return OuterType(
inner=InnerType(tensor=constant_op.constant([[0., 1.], [2., 3.]])))
return dataset_ops.Dataset.range(1, 10).map(data_fn)
dist_dataset = input_util.get_distributed_datasets_from_function(
dataset_fn, input_workers, input_contexts, distribution)
iterator = iter(dist_dataset)
results = []
for element in iterator:
local_results = distribution.experimental_local_results(element)
for result in local_results:
results.append(result)
expect_component = OuterType(
inner=InnerType(tensor=constant_op.constant([[0., 1.], [2., 3.]])))
self.assertCountEqual(
num_workers * [expect_component for _ in range(1, 10)], results)
if __name__ == "__main__":
test_util.main()
| apache-2.0 |
tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py | 9 | 18574 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RebatchDataset` transformation."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import image_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
class BatchSizesForWorkerTest(test_base.DatasetTestBase,
parameterized.TestCase):
def _test(self, global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static):
"""Test that all constraints are met for given parameters."""
if not is_batch_size_static:
# Adding a constant value here prevents downstream computation from
# statically deriving the value of global batch size when running
# in graph mode.
global_batch_size += constant_op.constant(0, dtypes.int64)
batch_sizes_list = []
for i in range(num_workers):
batch_sizes_list.append(
self.evaluate(
distribute.batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker, i)))
for batch_sizes in batch_sizes_list:
# Constraint (A): for any worker, len(batch_sizes) == W * R
self.assertLen(batch_sizes, num_workers * num_replicas_per_worker)
# Constraint (B): for any worker, sum(batch_sizes) == G
self.assertAllEqual(np.sum(batch_sizes), global_batch_size)
# Each per-worker batch is split into num_workers global steps
for step_index in range(num_workers):
actual_global_batch = 0
offset = step_index * num_replicas_per_worker
for batch_sizes in batch_sizes_list:
actual_global_batch += np.sum(batch_sizes[offset:offset +
num_replicas_per_worker])
# Constraint (C): for any step, batch size across all workers add up to G.
self.assertAllEqual(
global_batch_size,
actual_global_batch,
)
# Constraint (D): Batch size of any two replicas differs by at most one
self.assertLessEqual(np.max(batch_sizes_list) - np.min(batch_sizes_list), 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBasic(self, is_batch_size_static):
# Manually verify basic test case.
global_batch_size = 8
num_workers = 2
num_replicas_per_worker = 2
for worker_index in range(4):
batch_sizes = distribute.batch_sizes_for_worker(global_batch_size,
num_workers,
num_replicas_per_worker,
worker_index)
self.assertAllEqual([2, 2, 2, 2],
tensor_util.constant_value(batch_sizes))
self._test(global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeIndivisibleByNumWorkers(self, is_batch_size_static):
global_batch_size = 4
num_workers = 3
num_replicas_per_worker = 1
def get_batch_sizes_for_worker(worker_index):
return tensor_util.constant_value(
distribute.batch_sizes_for_worker(global_batch_size, num_workers,
num_replicas_per_worker,
worker_index))
# Manually verify this test case.
self.assertAllEqual([2, 1, 1], get_batch_sizes_for_worker(0))
self.assertAllEqual([1, 1, 2], get_batch_sizes_for_worker(1))
self.assertAllEqual([1, 2, 1], get_batch_sizes_for_worker(2))
self._test(global_batch_size, num_workers, num_replicas_per_worker,
is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeIndivisibleByNumReplicas(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=1,
num_replicas_per_worker=5,
is_batch_size_static=is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeSmallerThanNumReplicas(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=2,
num_replicas_per_worker=5,
is_batch_size_static=is_batch_size_static)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(is_batch_size_static=[True, False])))
def testBatchSizeSmallerThanNumWorkers(self, is_batch_size_static):
self._test(
global_batch_size=4,
num_workers=5,
num_replicas_per_worker=1,
is_batch_size_static=is_batch_size_static)
def _flat_shapes(dataset):
return [
ts.as_list()
for ts in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
]
class LegacyRebatchDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testBasic(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(
4, drop_remainder=drop_remainder)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=2)
expected_shapes = [[2]] if drop_remainder else [[None]]
self.assertEqual(expected_shapes, _flat_shapes(rebatched_dataset))
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCanHandleUnknownRank(self):
dataset = dataset_ops.Dataset.from_tensors("xxx")
# decode_image results in a tensor of completely unknown shape (i.e. unknown
# rank)
dataset = dataset.map(image_ops.decode_image)
self.assertEqual([tensor_shape.TensorShape(None)],
nest.flatten(
dataset_ops.get_legacy_output_shapes(dataset)))
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=4)
# Note that we are just testing the dataset shapes, not the actual output.
self.assertEqual(
[tensor_shape.TensorShape(None)],
nest.flatten(dataset_ops.get_legacy_output_shapes(rebatched_dataset)))
@combinations.generate(test_base.default_test_combinations())
def testCanHandleUnknownDims(self):
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.batch(10, drop_remainder=False)
dataset = dataset.batch(10, drop_remainder=False)
self.assertEqual([[None, None]], _flat_shapes(dataset))
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=4)
# Note that we are just testing the dataset shapes, not the actual output.
self.assertEqual([[None, None]], _flat_shapes(rebatched_dataset))
@combinations.generate(test_base.default_test_combinations())
def testScalarInputError(self):
dataset = dataset_ops.Dataset.range(1024)
distribute._LegacyRebatchDataset(dataset.batch(4), num_replicas=4)
with self.assertRaises(ValueError):
distribute._LegacyRebatchDataset(dataset, num_replicas=4)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testBatchNotDivisibleByNumReplicas(self, drop_remainder):
dataset = dataset_ops.Dataset.range(8).batch(
4, drop_remainder=drop_remainder)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=3)
self.assertEqual([[None]], _flat_shapes(rebatched_dataset))
# This rebatches into sub-batches of size 2, since ceil(4 / 3) = 2. However,
# this means that only the first 2 replicas will get data.
expected_output = [[0, 1], [2, 3], [], [4, 5], [6, 7], []]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testTupleOutput(self):
dataset = dataset_ops.Dataset.range(1024).map(lambda x: (x, x)).batch(32)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=4)
expected_output = [([k for k in range(i, i + 8)], # pylint: disable=g-complex-comprehension
[k for k in range(i, i + 8)])
for i in range(0, 1024, 8)]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNestedDictionaryOutput(self):
dataset = dataset_ops.Dataset.range(8).map(
lambda x: {"a": x, "b": {"c": x + 1}}).batch(4)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=2)
expected_output = [{"a": [0, 1], "b": {"c": [1, 2]}},
{"a": [2, 3], "b": {"c": [3, 4]}},
{"a": [4, 5], "b": {"c": [5, 6]}},
{"a": [6, 7], "b": {"c": [7, 8]}}]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testFinalPartialBatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(10).batch(
4, drop_remainder=drop_remainder)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=2)
self.assertEqual([[2] if drop_remainder else [None]],
_flat_shapes(rebatched_dataset))
if drop_remainder:
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7]]
else:
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7], [8], [9]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(drop_remainder=[True, False])))
def testFinalPartialBatchAfterRebatch(self, drop_remainder):
dataset = dataset_ops.Dataset.range(9).batch(
4, drop_remainder=drop_remainder)
rebatched_dataset = distribute._LegacyRebatchDataset(
dataset, num_replicas=2)
self.assertEqual([[2] if drop_remainder else [None]],
_flat_shapes(rebatched_dataset))
if drop_remainder:
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7]]
else:
expected_output = [[0, 1], [2, 3], [4, 5], [6, 7], [8], []]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testMultipleBatches(self):
dataset = dataset_ops.Dataset.range(16).batch(2).batch(4)
self.assertEqual([[None, None]], _flat_shapes(dataset))
# Each element is a list of 4 elements where each element is a list of 2.
expected_output = [[[0, 1], [2, 3], [4, 5], [6, 7]],
[[8, 9], [10, 11], [12, 13], [14, 15]]]
self.assertDatasetProduces(dataset, expected_output)
rebatched_dataset = distribute._LegacyRebatchDataset(dataset, 2)
self.assertEqual([[None, None]], _flat_shapes(rebatched_dataset))
# Each element is a list of 2 elements where each element is a list of 2.
expected_output = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]],
[[12, 13], [14, 15]]]
self.assertDatasetProduces(rebatched_dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testRaggedTensorDataset(self):
# Set up a dataset that produces ragged tensors with a static batch size.
row_lengths = np.random.randint(8, size=128)
values = np.random.normal(size=np.sum(row_lengths)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(
ragged_tensor.RaggedTensor.from_row_lengths(values, row_lengths))
dataset = dataset.batch(32, drop_remainder=True)
# The map changes the internal representation of the ragged tensor.
# This test will fail if we don't normalize the tensor representation.
dataset = dataset.map(lambda x: x)
dataset = distribute._LegacyRebatchDataset(dataset, num_replicas=8)
# After rebatching, batch size is now 4.
expected_output = []
value_index = 0
for batch_row_lengths in row_lengths.reshape((-1, 4)):
num_values = np.sum(batch_row_lengths)
expected_output.append(
ragged_tensor.RaggedTensor.from_row_lengths(
values[value_index:(value_index + num_values)],
batch_row_lengths))
value_index += num_values
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNoneDataset(self):
# Some datasets, e.g. datasets with None tensors, have components without
# output shapes. Test that this doesn't break rebatching shape inference
# logic.
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.map(lambda x: (x, None))
dataset = dataset.batch(4, drop_remainder=True)
_ = distribute._LegacyRebatchDataset(dataset, num_replicas=2)
class ComputeBatchSizeTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeKnown(self):
# When drop_remainder=True, batch size can be inferred from the type spec.
dataset = dataset_ops.Dataset.range(32).batch(4, drop_remainder=True)
dataset = dataset_ops.Dataset.zip((dataset, dataset))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeKnownAndMismatched(self):
# Return -1 when different components have different batch sizes.
dataset = dataset_ops.Dataset.range(32)
dataset = dataset_ops.Dataset.zip((dataset.batch(4, drop_remainder=True),
dataset.batch(8, drop_remainder=True)))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeUnknown(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithPassthrough(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset.take(5)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithPassthroughInvalid(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset.map(lambda x: x + 1)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithZip(self):
dataset = dataset_ops.Dataset.range(32).batch(4)
dataset = dataset_ops.Dataset.zip((dataset, dataset))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testComputeBatchSizeWithZipMismatched(self):
dataset = dataset_ops.Dataset.range(32)
dataset = dataset_ops.Dataset.zip((dataset.batch(4), dataset.batch(8)))
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(-1, self.evaluate(batch_size))
@combinations.generate(test_base.default_test_combinations())
def testNoneDataset(self):
# Some datasets, e.g. datasets with None tensors, have components without
# output shapes. Test that this doesn't break computing batch size logic.
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.map(lambda x: (x, None))
dataset = dataset.batch(4, drop_remainder=True)
batch_size = distribute.compute_batch_size(dataset)
self.assertEqual(4, self.evaluate(batch_size))
class LegacyRebatchDatasetCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
def build_dataset(num_elements, batch_size):
return distribute._LegacyRebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_replicas=4)
verify_fn(self, lambda: build_dataset(64, 8), num_outputs=8)
if __name__ == "__main__":
test.main()
| apache-2.0 |
arabenjamin/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 303 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
heli522/scikit-learn | sklearn/datasets/california_housing.py | 197 | 3877 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
| bsd-3-clause |
arabenjamin/scikit-learn | examples/model_selection/plot_precision_recall.py | 248 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
michaelgugino/turbo-lister | sqlalchemy/orm/query.py | 2 | 128892 | # orm/query.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from ..sql import operators
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_lockmode = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_text(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
self._order_by = self._distinct = False
return self._no_criterion_condition("get")
def _no_criterion_condition(self, meth):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or self._order_by or self._distinct:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.part,
parts_alias.sub_part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
See also:
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.SelectBaseMixin.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.SelectBaseMixin.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable.
"""
self._enable_eagerloads = value
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
WARNING: use this method with caution; if the same instance is present
in more than one batch of rows, end-user changes to attributes will be
overwritten.
In particular, it's usually impossible to use this setting with
eagerly loaded collections (i.e. any lazy='joined' or 'subquery')
since those collections will be cleared for a new load when
encountered in a subsequent result batch. In the case of 'subquery'
loading, the full result for all rows is fetched which generally
defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per`
will set the ``stream_results`` execution option to True, currently
this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors instead of pre-buffer
all rows for this query. Other DBAPIs pre-buffer all rows before
making them available.
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._lockmode is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
_enable_single_crit(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
if entities:
q._set_entities(entities)
return q
@_generative()
def _enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
"""
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new Query object with the specified locking mode.
:param mode: a string representing the desired locking mode. A
corresponding value is passed to the ``for_update`` parameter of
:meth:`~sqlalchemy.sql.expression.select` when the query is
executed. Valid values are:
``'update'`` - passes ``for_update=True``, which translates to
``FOR UPDATE`` (standard SQL, supported by most dialects)
``'update_nowait'`` - passes ``for_update='nowait'``, which
translates to ``FOR UPDATE NOWAIT`` (supported by Oracle,
PostgreSQL 8.1 upwards)
``'read'`` - passes ``for_update='read'``, which translates to
``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for
PostgreSQL)
``'read_nowait'`` - passes ``for_update='read_nowait'``, which
translates to ``FOR SHARE NOWAIT`` (supported by PostgreSQL).
.. versionadded:: 0.7.7
``FOR SHARE`` and ``FOR SHARE NOWAIT`` (PostgreSQL).
"""
self._lockmode = mode
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
See also:
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
See also:
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`having` is used in conjunction with :meth:`group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
if isinstance(criterion, util.string_types):
criterion = sql.text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most common
usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param *props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :class:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
See also:
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how :meth:`~.Query.join`
is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
return self._join(props,
outerjoin=False, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
right_entity = onclause.property.mapper
of_type = getattr(onclause, '_of_type', None)
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = not create_aliases and \
sql_util.selectables_overlap(l_info.selectable,
r_info.selectable)
if overlap and l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement argument is either a string, a ``select()`` construct,
or a ``text()`` construct, and should return the set of columns
appropriate to the entity class represented by this ``Query``.
"""
if isinstance(statement, util.string_types):
statement = sql.text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
.. versionadded:: 0.8.1
"""
return sql.exists(self.statement.with_only_columns(['1']))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships - it
is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key
references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an :class:`.ObjectDeletedError`
is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
#TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
:param values: a dictionary with attributes names as keys and literal
values or sql expressions as values.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships - it
is assumed that ON UPDATE CASCADE is configured for any foreign key
references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* As of 0.8, this method will support multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does extend to support
updates of joined-inheritance and other multiple table mappings. However,
the **join condition of an inheritance mapper is currently not
automatically rendered**.
Care must be taken in any multiple-table update to explicitly include
the joining condition between those tables, even in mappings where
this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of the
``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
#TODO: value keys need to be mapped to corresponding sql cols and
# instr.attr.s to string keys
#TODO: updates of manytoone relationships need to be converted to
# fk assignments
#TODO: cascades need handling.
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
_lockmode_lookup = {
'read': 'read',
'read_nowait': 'read_nowait',
'update': True,
'update_nowait': 'nowait',
None: False
}
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
if self._lockmode:
try:
context.for_update = self._lockmode_lookup[self._lockmode]
except KeyError:
raise sa_exc.ArgumentError(
"Unknown lockmode %r" % self._lockmode)
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# this would fix...
#import pdb
#pdb.set_trace()
#context.froms += tuple(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
for_update=context.for_update,
use_labels=context.labels)
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
for_update=context.for_update,
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in self._mapper_adapter_map.values():
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
self.custom_rows = bool(self.mapper.dispatch.append_result)
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(self.mapper,
sql_util.ColumnAdapter(from_obj,
self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, custom_rows):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
#if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
def proc(row, result):
return util.KeyedTuple([proc(row, None) for proc in procs], labels)
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
custom_rows = False
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
#c._label_name = self._label_name
#c.entity_zero = self.entity_zero
#c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, custom_rows):
procs, labels = zip(
*[ent.row_processor(query, context, custom_rows)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
if c is not column:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
custom_rows = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, custom_rows):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
def proc(row, result):
return row[column]
return proc, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = False
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| gpl-3.0 |
DonBeo/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 270 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
rdevon/cortex | cortex/built_ins/models/ali.py | 1 | 7744 | '''Adversarially learned inference and Bi-GAN
Currently noise encoder is not implemented.
'''
__author__ = 'R Devon Hjelm and Samuel Lavoie'
__author_email__ = 'erroneus@gmail.com'
from cortex.plugins import register_plugin, ModelPlugin
from cortex.built_ins.models.gan import (
get_positive_expectation, get_negative_expectation, GradientPenalty)
from cortex.built_ins.models.vae import ImageDecoder, ImageEncoder
from cortex.built_ins.networks.fully_connected import FullyConnectedNet
import torch
import torch.nn as nn
class ALIDiscriminatorModule(nn.Module):
'''ALI discriminator model.
'''
def __init__(self, x_encoder, z_encoder, topnet):
super(ALIDiscriminatorModule, self).__init__()
self.x_encoder = x_encoder
self.z_encoder = z_encoder
self.topnet = topnet
def forward(self, x, z, nonlinearity=None):
w = self.x_encoder(x)
if len(w.size()) == 4:
w = w.view(-1, w.size(1) * w.size(2) * w.size(3))
if self.z_encoder is None:
v = z
else:
v = self.z_encoder(z)
v = torch.cat([v, w], dim=1)
y = self.topnet(v)
return y
class BidirectionalModel(ModelPlugin):
def __init__(self, discriminator, **kwargs):
super().__init__(**kwargs)
self.discriminator = discriminator
encoder_contract = dict(kwargs=dict(dim_out='dim_z'))
decoder_contract = dict(kwargs=dict(dim_in='dim_z'))
self.decoder = ImageDecoder(contract=decoder_contract)
self.encoder = ImageEncoder(contract=encoder_contract)
def build(self):
self.decoder.build()
self.encoder.build()
def routine(self, inputs, Z, measure=None):
decoder = self.nets.decoder
encoder = self.nets.encoder
X_P, Z_Q = inputs, Z
X_Q = decoder(Z_Q)
Z_P = encoder(X_P)
E_pos, E_neg, _, _ = self.discriminator.score(
X_P, X_Q, Z_P, Z_Q, measure)
self.losses.decoder = -E_neg
self.losses.encoder = E_pos
def visualize(self):
self.decoder.visualize(auto_input=True)
self.encoder.visualize(auto_input=True)
class ALIDiscriminator(ModelPlugin):
def build(self, topnet_args=dict(dim_h=[512, 128], batch_norm=False),
dim_int=256, dim_z=None):
'''
Args:
topnet_args: Keyword arguments for the top network.
dim_int: Intermediate layer size for discriminator.
'''
x_encoder = self.nets.x_encoder
try:
z_encoder = self.nets.z_encoder
except KeyError:
z_encoder = None
if z_encoder is not None:
z_encoder_out = list(
z_encoder.models[-1].parameters())[-1].size()[0]
dim_in = dim_int + z_encoder_out
else:
dim_in = dim_int + dim_z
topnet = FullyConnectedNet(dim_in, 1, **topnet_args)
discriminator = ALIDiscriminatorModule(x_encoder, z_encoder, topnet)
self.nets.discriminator = discriminator
def routine(self, X_real, X_fake, Z_real, Z_fake, measure='GAN'):
'''
Args:
measure: GAN measure.
{GAN, JSD, KL, RKL (reverse KL), X2 (Chi^2), H2
(squared Hellinger), DV (Donsker Varahdan KL), W1 (IPM)}
'''
X_P, Z_P = X_real, Z_real
X_Q, Z_Q = X_fake, Z_fake
E_pos, E_neg, P_samples, Q_samples = self.score(
X_P, X_Q, Z_P, Z_Q, measure)
difference = E_pos - E_neg
self.losses.discriminator = -difference
self.results.update(Scores=dict(Ep=P_samples.mean().item(),
Eq=Q_samples.mean().item()))
self.results['{} distance'.format(measure)] = difference.item()
def score(self, X_P, X_Q, Z_P, Z_Q, measure):
discriminator = self.nets.discriminator
P_samples = discriminator(X_P, Z_P)
Q_samples = discriminator(X_Q, Z_Q)
E_pos = get_positive_expectation(P_samples, measure)
E_neg = get_negative_expectation(Q_samples, measure)
return E_pos, E_neg, P_samples, Q_samples
def visualize(self, X_real, X_fake, Z_real, Z_fake, targets):
discriminator = self.nets.discriminator
X_P, Z_P = X_real, Z_real
X_Q, Z_Q = X_fake, Z_fake
P_samples = discriminator(X_P, Z_P)
Q_samples = discriminator(X_Q, Z_Q)
self.add_histogram(dict(fake=Q_samples.view(-1).data,
real=P_samples.view(-1).data),
name='discriminator output')
self.add_scatter(Z_P, labels=targets.data, name='latent values')
class ALI(ModelPlugin):
'''Adversarially learned inference.
a.k.a. BiGAN
Note:
Currently noisy encoder not supported.
'''
defaults = dict(
data=dict(batch_size=dict(train=64, test=640),
inputs=dict(inputs='images')),
optimizer=dict(optimizer='Adam', learning_rate=1e-4),
train=dict(epochs=500, save_on_lowest='losses.generator')
)
def __init__(self):
super().__init__()
self.discriminator = ALIDiscriminator()
self.bidirectional_model = BidirectionalModel(
discriminator=self.discriminator)
encoder_contract = dict(nets=dict(encoder='x_encoder'),
kwargs=dict(dim_out='dim_int'))
self.encoder = ImageEncoder(contract=encoder_contract)
penalty_contract = dict(nets=dict(network='discriminator'))
self.penalty = GradientPenalty(contract=penalty_contract)
def build(self, dim_z=None, dim_int=None, use_z_encoder=False,
z_encoder_args=dict(dim_h=256, batch_norm=True),
noise_type='normal'):
'''
Args:
use_z_encoder: Use a neural network for Z pathway in discriminator.
z_encoder_args: Arguments for the Z pathway encoder.
'''
self.add_noise('Z', dist=noise_type, size=dim_z)
if use_z_encoder:
encoder = FullyConnectedNet(dim_z, dim_int, **z_encoder_args)
self.nets.z_encoder = encoder
self.encoder.build()
self.discriminator.build()
self.bidirectional_model.build()
def train_step(self, discriminator_updates=1):
'''
Args:
generator_updates: Number of generator updates per step.
discriminator_updates: Number of discriminator updates per step.
'''
for _ in range(discriminator_updates):
self.data.next()
inputs, Z = self.inputs('inputs', 'Z')
generated = self.bidirectional_model.decoder.decode(Z)
inferred = self.bidirectional_model.encoder.encode(inputs)
self.discriminator.routine(
inputs, generated.detach(), inferred.detach(), Z)
self.optimizer_step()
self.penalty.routine((inputs, inferred))
self.optimizer_step()
self.bidirectional_model.train_step()
def eval_step(self):
self.data.next()
inputs, Z = self.inputs('inputs', 'Z')
generated = self.bidirectional_model.decoder.decode(Z)
inferred = self.bidirectional_model.encoder.encode(inputs)
self.discriminator.routine(inputs, generated, inferred, Z)
self.bidirectional_model.eval_step()
def visualize(self, inputs, Z, targets):
generated = self.bidirectional_model.decoder.decode(Z)
inferred = self.bidirectional_model.encoder.encode(inputs)
self.bidirectional_model.visualize()
self.discriminator.visualize(inputs, generated, inferred, Z, targets)
register_plugin(ALI)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/learning_curve.py | 28 | 13300 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/comparision_with_kNN_PCA/Combined/k_PC_categories_generalized.py | 1 | 5147 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*2000 + ['Rigid-Movable']*2000 + ['Soft-Fixed']*2000 + ['Soft-Movable']*2000
PCA_chunk_1 = ['1RF']*400 + ['2RF']*400 + ['3RF']*400 + ['4RF']*400 + ['5RF']*400 + ['1RM']*400 + ['2RM']*400 + ['3RM']*400 + ['4RM']*400 + ['5RM']*400 + ['1SF']*400 + ['2SF']*400 + ['3SF']*400 + ['4SF']*400 + ['5SF']*400 + ['1SM']*400 + ['2SM']*400 + ['3SM']*400 + ['4SM']*400 + ['5SM']*400
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = np.matrix(np.zeros((41,8000)))
### Simulation Data
tSamples = 121
data_rf = scipy.io.loadmat('rigid_fixed.mat')
data_sf = scipy.io.loadmat('soft_fixed.mat')
data_rm = scipy.io.loadmat('rigid_movable.mat')
data_sm = scipy.io.loadmat('soft_movable.mat')
simulforce = np.zeros((tSamples,8000))
datatime = np.arange(0,1.21,0.01)
dataforce_rf = np.transpose(data_rf['sensed_force_rf'])
dataforce_sf = np.transpose(data_sf['sensed_force_sf'])
dataforce_rm = np.transpose(data_rm['sensed_force_rm'])
dataforce_sm = np.transpose(data_sm['sensed_force_sm'])
simulforce = np.concatenate((dataforce_rf, dataforce_rm, dataforce_sf, dataforce_sm), axis = 1)
simulforce_cat = np.zeros((41,8000))
i = 0
j = 0
n = 121
while (i < n):
simulforce_cat[j,:] = simulforce[i,:]
i = i+3
j= j+1
Fmat = np.matrix(simulforce_cat)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((8000,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
ChadFulton/statsmodels | statsmodels/examples/try_polytrend.py | 1 | 1492 |
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load(as_pandas=False)
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
| bsd-3-clause |
irhete/predictive-monitoring-benchmark | experiments/dataset_confs.py | 1 | 9527 | import os
case_id_col = {}
activity_col = {}
resource_col = {}
timestamp_col = {}
label_col = {}
pos_label = {}
neg_label = {}
dynamic_cat_cols = {}
static_cat_cols = {}
dynamic_num_cols = {}
static_num_cols = {}
filename = {}
logs_dir = "labeled_logs_csv_processed"
#### Traffic fines settings ####
for formula in range(1,3):
dataset = "traffic_fines_%s"%formula
filename[dataset] = os.path.join(logs_dir, "traffic_fines_%s.csv"%formula)
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "Resource"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "label"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource", "lastSent", "notificationType", "dismissal"]
static_cat_cols[dataset] = ["article", "vehicleClass"]
dynamic_num_cols[dataset] = ["expense", "timesincelastevent", "timesincecasestart", "timesincemidnight", "event_nr", "month", "weekday", "hour", "open_cases"]
static_num_cols[dataset] = ["amount", "points"]
#### Sepsis Cases settings ####
datasets = ["sepsis_cases_%s" % i for i in range(1, 5)]
for dataset in datasets:
filename[dataset] = os.path.join(logs_dir, "%s.csv" % dataset)
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "org:group"
timestamp_col[dataset] = "time:timestamp"
label_col[dataset] = "label"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", 'org:group'] # i.e. event attributes
static_cat_cols[dataset] = ['Diagnose', 'DiagnosticArtAstrup', 'DiagnosticBlood', 'DiagnosticECG',
'DiagnosticIC', 'DiagnosticLacticAcid', 'DiagnosticLiquor',
'DiagnosticOther', 'DiagnosticSputum', 'DiagnosticUrinaryCulture',
'DiagnosticUrinarySediment', 'DiagnosticXthorax', 'DisfuncOrg',
'Hypotensie', 'Hypoxie', 'InfectionSuspected', 'Infusion', 'Oligurie',
'SIRSCritHeartRate', 'SIRSCritLeucos', 'SIRSCritTachypnea',
'SIRSCritTemperature', 'SIRSCriteria2OrMore'] # i.e. case attributes that are known from the start
dynamic_num_cols[dataset] = ['CRP', 'LacticAcid', 'Leucocytes', "hour", "weekday", "month", "timesincemidnight", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
static_num_cols[dataset] = ['Age']
#### Production log settings ####
dataset = "production"
filename[dataset] = os.path.join(logs_dir, "Production.csv")
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "Resource"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "label"
neg_label[dataset] = "regular"
pos_label[dataset] = "deviant"
# features for classifier
static_cat_cols[dataset] = ["Part_Desc_", "Rework"]
static_num_cols[dataset] = ["Work_Order_Qty"]
dynamic_cat_cols[dataset] = ["Activity", "Resource", "Report_Type", "Resource.1"]
dynamic_num_cols[dataset] = ["Qty_Completed", "Qty_for_MRB", "activity_duration", "hour", "weekday", "month", "timesincemidnight", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
#### BPIC2017 settings ####
bpic2017_dict = {"bpic2017_cancelled": "BPIC17_O_Cancelled.csv",
"bpic2017_accepted": "BPIC17_O_Accepted.csv",
"bpic2017_refused": "BPIC17_O_Refused.csv"
}
for dataset, fname in bpic2017_dict.items():
filename[dataset] = os.path.join(logs_dir, fname)
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = 'org:resource'
timestamp_col[dataset] = 'time:timestamp'
label_col[dataset] = "label"
neg_label[dataset] = "regular"
pos_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", 'org:resource', 'Action', 'EventOrigin', 'lifecycle:transition',
"Accepted", "Selected"]
static_cat_cols[dataset] = ['ApplicationType', 'LoanGoal']
dynamic_num_cols[dataset] = ['FirstWithdrawalAmount', 'MonthlyCost', 'NumberOfTerms', 'OfferedAmount', 'CreditScore', "timesincelastevent", "timesincecasestart", "timesincemidnight", "event_nr", "month", "weekday", "hour", "open_cases"]
static_num_cols[dataset] = ['RequestedAmount']
#### Hospital billing settings ####
for i in range(1, 7):
for suffix in ["", "_sample10000", "_sample30000"]:
dataset = "hospital_billing_%s%s" % (i, suffix)
filename[dataset] = os.path.join(logs_dir, "hospital_billing_%s%s.csv" % (i, suffix))
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "Resource"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "label"
neg_label[dataset] = "regular"
pos_label[dataset] = "deviant"
if i == 1:
neg_label[dataset] = "deviant"
pos_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", 'Resource', 'actOrange', 'actRed', 'blocked', 'caseType', 'diagnosis', 'flagC', 'flagD', 'msgCode', 'msgType', 'state', 'version']#, 'isCancelled', 'isClosed', 'closeCode']
static_cat_cols[dataset] = ['speciality']
dynamic_num_cols[dataset] = ['msgCount', "timesincelastevent", "timesincecasestart", "timesincemidnight", "event_nr", "month", "weekday", "hour"]#, "open_cases"]
static_num_cols[dataset] = []
if i == 1: # label is created based on isCancelled attribute
dynamic_cat_cols[dataset] = [col for col in dynamic_cat_cols[dataset] if col != "isCancelled"]
elif i == 2:
dynamic_cat_cols[dataset] = [col for col in dynamic_cat_cols[dataset] if col != "isClosed"]
#### BPIC2012 settings ####
bpic2012_dict = {"bpic2012_cancelled": "bpic2012_O_CANCELLED-COMPLETE.csv",
"bpic2012_accepted": "bpic2012_O_ACCEPTED-COMPLETE.csv",
"bpic2012_declined": "bpic2012_O_DECLINED-COMPLETE.csv"
}
for dataset, fname in bpic2012_dict.items():
filename[dataset] = os.path.join(logs_dir, fname)
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "Resource"
timestamp_col[dataset] = "Complete Timestamp"
label_col[dataset] = "label"
neg_label[dataset] = "regular"
pos_label[dataset] = "deviant"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "Resource"]
static_cat_cols[dataset] = []
dynamic_num_cols[dataset] = ["hour", "weekday", "month", "timesincemidnight", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
static_num_cols[dataset] = ['AMOUNT_REQ']
#### BPIC2015 settings ####
for municipality in range(1,6):
for formula in range(1,3):
dataset = "bpic2015_%s_f%s"%(municipality, formula)
filename[dataset] = os.path.join(logs_dir, "BPIC15_%s_f%s.csv"%(municipality, formula))
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity"
resource_col[dataset] = "org:resource"
timestamp_col[dataset] = "time:timestamp"
label_col[dataset] = "label"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity", "monitoringResource", "question", "org:resource"]
static_cat_cols[dataset] = ["Responsible_actor"]
dynamic_num_cols[dataset] = ["hour", "weekday", "month", "timesincemidnight", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
static_num_cols[dataset] = ["SUMleges", 'Aanleg (Uitvoeren werk of werkzaamheid)', 'Bouw', 'Brandveilig gebruik (vergunning)', 'Gebiedsbescherming', 'Handelen in strijd met regels RO', 'Inrit/Uitweg', 'Kap', 'Milieu (neutraal wijziging)', 'Milieu (omgevingsvergunning beperkte milieutoets)', 'Milieu (vergunning)', 'Monument', 'Reclame', 'Sloop']
if municipality in [3,5]:
static_num_cols[dataset].append('Flora en Fauna')
if municipality in [1,2,3,5]:
static_num_cols[dataset].append('Brandveilig gebruik (melding)')
static_num_cols[dataset].append('Milieu (melding)')
#### BPIC2011 settings ####
for formula in range(1,5):
dataset = "bpic2011_f%s"%formula
filename[dataset] = os.path.join(logs_dir, "BPIC11_f%s.csv"%formula)
case_id_col[dataset] = "Case ID"
activity_col[dataset] = "Activity code"
resource_col[dataset] = "Producer code"
timestamp_col[dataset] = "time:timestamp"
label_col[dataset] = "label"
pos_label[dataset] = "deviant"
neg_label[dataset] = "regular"
# features for classifier
dynamic_cat_cols[dataset] = ["Activity code", "Producer code", "Section", "Specialism code.1", "group"]
static_cat_cols[dataset] = ["Diagnosis", "Treatment code", "Diagnosis code", "Specialism code", "Diagnosis Treatment Combination ID"]
dynamic_num_cols[dataset] = ["Number of executions", "hour", "weekday", "month", "timesincemidnight", "timesincelastevent", "timesincecasestart", "event_nr", "open_cases"]
static_num_cols[dataset] = ["Age"]
| apache-2.0 |
heli522/scikit-learn | sklearn/datasets/__init__.py | 175 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
jhaux/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 22 | 5276 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
DonBeo/scikit-learn | doc/conf.py | 16 | 8442 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
jhaux/tensorflow | tensorflow/contrib/keras/python/keras/datasets/cifar100.py | 57 | 2135 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR100 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.datasets.cifar import load_batch
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
def load_data(label_mode='fine'):
"""Loads CIFAR100 dataset.
Arguments:
label_mode: one of "fine", "coarse".
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: in case of invalid `label_mode`.
"""
if label_mode not in ['fine', 'coarse']:
raise ValueError('label_mode must be one of "fine" "coarse".')
dirname = 'cifar-100-python'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
fpath = os.path.join(path, 'train')
x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')
fpath = os.path.join(path, 'test')
x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
googlearchive/rgc-models | response_model/python/metric_learning/end_to_end/ln_model.py | 1 | 6767 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Learn LN model for multiple cells in a population."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
import tensorflow.contrib.slim as slim
from absl import app
from absl import gfile
import numpy as np, h5py,numpy
import scipy.io as sio
from numpy.random import RandomState
# for plotting stuff
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
import retina.response_model.python.metric_learning.end_to_end.utils as utils
import retina.response_model.python.metric_learning.end_to_end.data_util as data_util
import retina.response_model.python.metric_learning.end_to_end.config as config
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=()):
# Load stimulus-response data
datasets = gfile.ListDirectory(FLAGS.src_dir)
responses = []
print(datasets)
for icnt, idataset in enumerate([datasets]): #TODO(bhaishahster): HACK.
fullpath = os.path.join(FLAGS.src_dir, idataset)
if gfile.IsDirectory(fullpath):
key = 'stim_%d' % icnt
op = data_util.get_stimulus_response(FLAGS.src_dir, idataset, key)
stimulus, resp, dimx, dimy, num_cell_types = op
responses += resp
for idataset in range(len(responses)):
k, b, ttf = fit_ln_population(responses[idataset]['responses'], stimulus) # Use FLAGS.taskID
save_dict = {'k': k, 'b': b, 'ttf': ttf}
save_analysis_filename = os.path.join(FLAGS.save_folder,
responses[idataset]['piece']
+ '_ln_model.pkl')
pickle.dump(save_dict, gfile.Open(save_analysis_filename, 'w'))
def fit_ln_population(response, stimulus, reg=0.00001):
leng = np.minimum(response.shape[0], stimulus.shape[0])
stimulus = stimulus[:leng, :, :]
response = response[:leng, :]
n_cells = response.shape[1]
stimx = stimulus.shape[1]
stimy = stimulus.shape[2]
stim_2d = np.reshape(stimulus, [-1, stimx*stimy])
# sta = stim_2d[:-7].T.dot(response[7:, :])
with tf.Graph().as_default():
with tf.Session() as sess:
stim_tf = tf.placeholder(tf.float32) # T x stimx x stimy
resp_tf = tf.placeholder(tf.float32) # T x # nells
k_tf = tf.Variable(np.zeros((stimx, stimy, n_cells)).astype(np.float32))
b_tf = tf.Variable(np.float32(np.zeros(n_cells)))
stim_tf_flat = tf.reshape(stim_tf, [-1, stimx * stimy])
# convolve each pixel in time.
ttf_tf = tf.Variable(0.01 * np.ones(30).astype(np.float32))
tfd = tf.expand_dims
ttf_4d = tfd(tfd(tfd(ttf_tf, 1), 2), 3)
stim_pad = tf.pad(stim_tf_flat, np.array([[29, 0], [0, 0]]).astype(np.int))
stim_4d = tfd(tfd(tf.transpose(stim_pad, [1, 0]), 2), 3)
stim_smooth = tf.nn.conv2d(stim_4d, ttf_4d, strides=[1, 1, 1, 1], padding="VALID")
stim_smooth_2d = tf.squeeze(tf.transpose(stim_smooth, [2, 1, 0, 3]))
k_tf_flat = tf.reshape(k_tf, [stimx*stimy, n_cells])
lam_raw = tf.matmul(stim_smooth_2d, k_tf_flat) + b_tf
lam = tf.exp(lam_raw)
loss = tf.reduce_mean(lam) - tf.reduce_mean(resp_tf * lam_raw)
train_op_part = tf.train.AdamOptimizer(0.01).minimize(loss)
# Locally reweighted L1
neighbor_mat = utils.get_neighbormat(np.ones((stimx, stimy)))
n_mat = tf.constant(neighbor_mat.astype(np.float32))
eps_neigh = 0.001
with tf.control_dependencies([train_op_part]):
wts_tf = 1 / (tf.matmul(n_mat, tf.reshape(tf.abs(k_tf), [stimx * stimy, n_cells])) + eps_neigh)
wts_tf_3d = tf.reshape(wts_tf, [stimx, stimy, n_cells])
proj_k = tf.assign(k_tf, tf.nn.relu(k_tf - wts_tf_3d * reg) - tf.nn.relu(-k_tf - wts_tf_3d * reg))
train_op = tf.group(train_op_part, proj_k)
sess.run(tf.global_variables_initializer())
# loss_np_prev = np.inf
eps = 1e-4
for iiter in range(10000):
tms = np.random.randint(leng - 10000)
_, loss_np = sess.run([train_op, loss],
feed_dict={stim_tf : stimulus[tms:tms+10000, :].astype(np.float32),
resp_tf : response[tms:tms+10000, :].astype(np.float32)})
print(loss_np)
# if np.abs(loss_np - loss_np_prev) < eps:
# break
#else:
# loss_np_prev = loss_np
'''
plt.ion()
plt.subplot(2, 1, 1)
plt.cla()
plt.plot(sess.run(ttf_tf))
plt.subplot(2, 1, 2)
plt.cla()
k_np = sess.run(k_tf)
plt.imshow(k_np[:, :, 23].T, interpolation='nearest', cmap='gray')
plt.show()
plt.draw()
plt.pause(0.05)
'''
k = sess.run(k_tf)
b = sess.run(b_tf)
ttf = sess.run(ttf_tf)
return k, b, ttf
def predict_responses_ln(stimulus, k, b, ttf, n_trials=1):
with tf.Graph().as_default():
with tf.Session() as sess:
stim_tf = tf.placeholder(tf.float32) # T x stimx x stimy
k_tf = tf.constant(k.astype(np.float32))
b_tf = tf.constant(np.float32(b))
stim_tf_flat = tf.reshape(stim_tf, [-1, stimx * stimy])
# convolve each pixel in time.
ttf_tf = tf.constant(ttf)
tfd = tf.expand_dims
ttf_4d = tfd(tfd(tfd(ttf_tf, 1), 2), 3)
stim_pad = tf.pad(stim_tf_flat, np.array([[29, 0], [0, 0]]).astype(np.int))
stim_4d = tfd(tfd(tf.transpose(stim_pad, [1, 0]), 2), 3)
stim_smooth = tf.nn.conv2d(stim_4d, ttf_4d, strides=[1, 1, 1, 1], padding="VALID")
stim_smooth_2d = tf.squeeze(tf.transpose(stim_smooth, [2, 1, 0, 3]))
k_tf_flat = tf.reshape(k_tf, [stimx*stimy, n_cells])
lam_raw = tf.matmul(stim_smooth_2d, k_tf_flat) + b_tf
lam = tf.nn.softplus(lam_raw)
sess.run(tf.global_variables_initializer())
lam_np = sess.run(lam, feed_dict={stim_tf: stimulus.astype(np.float32)})
# repeat lam_np for number of trials
lam_np = np.repeat(np.expand_dims(lam_np, 0), n_trials, axis=0)
spikes = np.random.poisson(lam_np)
return spikes, lam_np
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
ottogroup/dstoolbox | dstoolbox/cluster.py | 1 | 3828 | """Additional clustering algorithms and estimators."""
import numpy as np
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import linkage
from sklearn.base import BaseEstimator
from sklearn.base import ClusterMixin
from sklearn.utils import check_array
def hierarchical_clustering(
X,
max_dist=0.5,
method='single',
metric='euclidean',
criterion='inconsistent',
):
"""Performs hierarchical/agglomerative clustering on the input
array.
Parameters
----------
max_dist : float (default=0.5)
Maximum allowed distance for two clusters to be merged.
method : str (default='single')
How distances are applied, see scipy.cluster.hierarchy.linkage
documentation.
metric : str (default='euclidean')
Which distance metric to use. See
scipy.cluster.hierarchy.linkage documentation.
criterion : str (default='inconsistent')
Criterion to use in forming flat clusters. See
scipy.cluster.hierarchy.fcluster documentation.
"""
# pylint: disable=len-as-condition
if len(X) < 1:
return np.array([])
if len(X) == 1:
return np.array([0])
labels = fcluster(
linkage(
X,
method=method,
metric=metric,
),
t=max_dist,
criterion=criterion,
)
return labels
class HierarchicalClustering(BaseEstimator, ClusterMixin):
"""Use hierarchical clustering with cut-off value.
Similar to sklearn.cluster.hierarchical.linkage_tree but does not
require to indicate the number of clusters beforehand. Instead,
the number of clusters is determined dynamically by use of the
cut-off value `max_dist`. Therefore, the number of different
clusters will depend on the input data, similarly to DBSCAN.
Note: `HierarchicalClustering` does not support sparse
matrices. If you want to use sparse matrices, pre-compute the
pair-wise distance matrix (e.g. with
`scipy.spatial.distance.pdist`), transform it using
`scipy.spatial.distance.squareform`, and pass the result to this
estimator with parameter `metric` set to None.
Parameters
----------
max_dist : float (default=0.5)
Maximum allowed distance for two clusters to be merged.
method : str (default='single')
How distances are applied, see scipy.cluster.hierarchy.linkage
documentation.
metric : str (default='euclidean')
Which distance metric to use. See
scipy.cluster.hierarchy.linkage documentation.
criterion : str (default='inconsistent')
Criterion to use in forming flat clusters. See
scipy.cluster.hierarchy.fcluster documentation.
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
"""
def __init__(
self,
max_dist=0.5,
method='single',
metric='euclidean',
criterion='inconsistent',
):
self.max_dist = max_dist
self.method = method
self.metric = metric
self.criterion = criterion
# pylint: disable=attribute-defined-outside-init,unused-argument
def fit(self, X, y=None, **fit_params):
"""Fit the hierarchical clustering on the data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self : instance of HierarchicalClustering
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
self.labels_ = hierarchical_clustering(
X,
max_dist=self.max_dist,
method=self.method,
metric=self.metric,
criterion=self.criterion,
)
return self
| apache-2.0 |
raphaelrubino/nid | nn/mono/nid.py | 1 | 4321 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
np.random.seed(1337)
from keras.models import Sequential
from keras.layers import Embedding, Dropout, Dense, Flatten
from keras.utils import np_utils
from sklearn.utils import shuffle
import time
import sys
class Neural_information_density():
def __init__( self, context, target, max_features, max_length, batch_size, valid_size ):
self.context = context
self.target = target
self.batch = batch_size
self.valid_size = valid_size
nb_instances = len( self.context )
print( "Instances: {0}".format( nb_instances ), flush = True )
self.valid_size = np.int( valid_size * nb_instances )
self.train_size = np.int( nb_instances - self.valid_size )
self.max_features = max_features
self.max_length = max_length
self.model = -1
self.build_train_valid()
self.build_batched_data()
def build_train_valid( self ):
self.context, self.target = shuffle( self.context, self.target )
self.context_train = self.context[ :self.train_size ]
self.target_train = self.target[ :self.train_size ]
self.context_valid = self.context[ self.train_size: ]
self.target_valid = self.target[ self.train_size: ]
def build_batched_data( self ):
self.batch_context_train = np.asarray( [ np.asarray( self.context_train[ x : x + self.batch ] ) for x in range( 0, len( self.context_train ), self.batch ) ] )
self.batch_target_train = np.asarray( [ np.asarray( self.target_train[ x : x + self.batch ] ) for x in range( 0, len( self.target_train ), self.batch ) ] )
self.batch_context_valid = np.asarray( [ np.asarray( self.context_valid[ x : x + self.batch ] ) for x in range( 0, len( self.context_valid ), self.batch ) ] )
self.batch_target_valid = np.asarray( [ np.asarray( self.target_valid[ x : x + self.batch ] ) for x in range( 0, len( self.target_valid ), self.batch ) ] )
def get_model( self ):
return self.model
def save_architecture( self, filename ):
with open( filename + '.architecture.json', "w" ) as f:
f.write( self.model.to_json() )
def save_weights( self, filename ):
self.model.save_weights( filename + '.weights.h5', overwrite = True )
def get_default_model( self, embedding, dropout ):
model = Sequential()
model.add( Embedding( self.max_features, embedding, input_length = self.max_length ) )
model.add( Flatten() )
model.add( Dropout( dropout ) )
model.add( Dense( self.max_features, activation = 'softmax' ) )
return model
def train_model( self ):
train_loss = 0.0
train_acc = 0.0
for j in range( self.batch_target_train.shape[ 0 ] ):
loss, metrics = self.model.train_on_batch( self.batch_context_train[ j ], \
np_utils.to_categorical( self.batch_target_train[ j ], num_classes = self.max_features ) )
train_loss += loss
train_acc += metrics
train_loss /= j
train_acc /= j
return train_loss, train_acc
def valid_model( self ):
valid_loss = 0.0
valid_acc = 0.0
for k in range( self.batch_target_valid.shape[ 0 ] ):
loss, metrics = self.model.test_on_batch( self.batch_context_valid[ k ], \
np_utils.to_categorical( self.batch_target_valid[ k ], num_classes = self.max_features ) )
valid_loss += loss
valid_acc += metrics
valid_loss /= k
valid_acc /= k
return valid_loss, valid_acc
def train( self, embedding_size, dropout, nb_epochs, out_model ):
print( "Building model", flush = True )
self.model = self.get_default_model( embedding_size, dropout )
self.model.compile( optimizer = 'RMSprop', loss = 'categorical_crossentropy', metrics = [ 'accuracy' ] )
best_acc = np.float( 0.0 )
best_loss = np.float( 999.9 )
for i in range( nb_epochs ):
time_start = time.time()
print( "Epoch {0}".format( i + 1 ), flush = True )
train_loss, train_acc = train_model()
valid_loss, valid_acc = valid_model()
if best_acc < valid_acc:
best_acc = valid_acc
self.save_weights( "{0}.acc_{1}".format( out_model, np.round( best_acc, 3 ) ) )
self.save_architecture( "{0}.acc_{1}".format( out_model, np.round( best_acc, 3 ) ) )
print( "train loss {0} -- acc: {1} ---- valid loss: {2} -- acc: {3}".format( train_loss, train_acc, valid_loss, valid_acc ), flush = True )
time_elapsed = time.time() - time_start
print( "{0} seconds".format( time_elapsed ), flush = True )
| mit |
ChadFulton/statsmodels | statsmodels/tsa/ar_model.py | 1 | 33794 | # -*- coding: utf-8 -*-
from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap, long
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=axis)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] if k_trend else 0 # only have to worry constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_prediction_index(self, start, end, dynamic, index=None):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
start = self._index[start]
if end is None:
end = self._index[-1]
start, end, out_of_sample, prediction_index = (
super(AR, self)._get_prediction_index(start, end, index))
# This replaces the _validate() call
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start)
# Other validation
_check_ar_start(start, k_ar, method, dynamic)
return start, end, out_of_sample, prediction_index
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start, end, out_of_sample, _ = (
self._get_prediction_index(start, end, dynamic))
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
else:
mu = 0
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
bestlag = 0
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# # how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lütkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
# start, end, out_of_sample, prediction_index = (
# self.model._get_prediction_index(start, end, index))
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults) # noqa:E305
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load(as_pandas=False)
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DatetimeIndex(start=d1.datetime,
periods=len(sunspots.endog),
freq='A-DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.Series(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.Series(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
| bsd-3-clause |
DonBeo/scikit-learn | examples/plot_kernel_ridge_regression.py | 229 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
mattdelhey/kaggle-galaxy | Old/linear.py | 2 | 8352 | import numpy as np
import cv2, os, inspect, re, math, datetime
import sklearn as sk
import matplotlib.pyplot as plt
import pandas as pd
import pandas.rpy.common as com
import rpy2.robjects.lib.ggplot2 as ggplot2
import rpy2.robjects as ro
from rpy2.robjects.packages import importr
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
from read import *
###### Parameters
f_in_trn = 'Data/train_28.csv'
f_in_tst = 'Data/test_28.csv'
sol_dir = 'Data/train_solutions.csv'
my_lam = 10
######
# Take care of some file i/o
my_file = re.search(r'_[0-9]+', f_in_trn).group()
my_dim = re.search(r'[0-9]+', my_file).group()
file_name = inspect.getfile(inspect.currentframe())
# Change the f_out!
f_out = 'Submissions/ls_clr_' + str(my_dim) + '.csv'
def read_X_Y(f_in_trn, f_in_tst, sol_dir, my_dim):
"""
Read in pre-processed matricies & train solutions
"""
print(file_name + ': Reading train/test matrix w/ dim = ' + str(my_dim))
Xtrn = ensure_dim(np.loadtxt(open(f_in_trn, 'rb'), delimiter = ',', skiprows = 0))
Xtst = ensure_dim(np.loadtxt(open(f_in_tst, 'rb'), delimiter = ',', skiprows = 0))
print(file_name + ': Reading training solutions')
Ytrn = np.loadtxt(open(sol_dir, 'rb'), delimiter = ',', skiprows = 1)
return (Xtrn, Xtst, Ytrn, my_dim)
def ls_train(Xtrn, Ytrn):
"""
Train for least squares
"""
print(file_name + ': Training [least squares] regression')
model = sk.linear_model.LinearRegression()
model.fit(Xtrn, Ytrn[::, 1:])
return model
def ls_pred(model, Xtst):
"""
Predict on new matrix for least squares
"""
print(file_name + ': Predicting on new matrix')
Ytst = model.predict(Xtst)
return Ytst
def rmse(y, y_pred, ax=None):
err = np.sqrt(((y - y_pred) ** 2).mean(axis=ax))
return err
def split_data(nrows, train = .6, val = .35, test = .05):
"""
Randomly partition data into train/val/test indicies
"""
# Sample train: if odd, take ceiling --> more training obs
rows = np.arange(nrows)
trainInd = np.random.choice(rows, size=int(np.ceil(nrows*train)), replace=False)
# Sample val: first remove training rows, take floor --> less val obs
rows = np.setdiff1d(rows, trainInd)
valInd = np.random.choice(rows, size=int(np.floor(nrows*val)), replace=False)
# Sample test: just take rows that aren't in train/val
rows = np.setdiff1d(rows, valInd)
testInd = rows
return (trainInd, valInd, testInd)
def ridge_train(Xtrn, Ytrn, lam):
"""
Train for ridge
"""
print(file_name + ': Training [ridge] regression w/ reg = ' + str(lam))
model = sk.linear_model.Ridge(alpha = lam, solver = 'svd')
#model = sk.linear_model.Ridge(alpha = lam)
model.fit(Xtrn, Ytrn[::, 1:])
#print('\t first model coefficient: ' + str(model.coef_[1]))
return model
def ridge_pred(model, Xtst):
"""
Predict on new matrix for ridge
"""
print(file_name + ': \t Predicting on new matrix')
Ytst = model.predict(Xtst)
return Ytst
def ridge_cv(Xtrn, Ytrn, lam_range = np.arange(0, 10.5, 0.5), n_trials = 10):
"""
Cross-validation to find regularization param
"""
print(file_name + ': Cross-validating for reg. param.')
val_err = np.zeros((n_trials, len(lam_range)))
for i in xrange(n_trials):
(trnInd, valInd, tstInd) = split_data(Xtrn.shape[0])
Xtrn_trn = Xtrn[trnInd]; Ytrn_trn = Ytrn[trnInd]
Xtrn_val = Xtrn[valInd]; Ytrn_val = Ytrn[valInd]
Xtrn_tst = Xtrn[tstInd]; Ytrn_tst = Ytrn[tstInd]
Xtrn_trn = preprocessing.scale(Xtrn_trn)
Xtrn_tst = preprocessing.scale(Xtrn_tst)
for index, lam_i in enumerate(lam_range):
model = ridge_train(Xtrn_trn, Ytrn_trn, lam = lam_i)
Ytrn_val_pred = ridge_pred(model, Xtrn_val)
#val_err[i, index] = np.sqrt(mean_squared_error(Ytrn_val[::, 1:], Ytrn_val_pred))
val_err[i, index] = rmse(Ytrn_val[::, 1:], Ytrn_val_pred)
colmeans = np.mean(val_err, axis=0)
lam_star = lam_range[np.argmin(colmeans)]
print(file_name + ': Best lambda was found to be: ' + str(lam_star))
return (lam_star, val_err)
def ridge_cv_plot(val_err, lam_range):
"""
Source: http://rpy.sourceforge.net/rpy2/doc-2.3/html/graphics.html
"""
base = importr('base')
df = pd.DataFrame(val_err, columns = lam_range)
df = pd.melt(df)
df_r = com.convert_to_r_dataframe(df)
# Create boxplot
gp = ggplot2.ggplot(df_r)
pp = gp + \
ggplot2.aes_string(x='factor(variable)', y='value') + \
ggplot2.geom_boxplot() + \
ggplot2.ggtitle("Validation Error by Lambda")
pp.plot()
return
def learning_curve(Xtrn, Ytrn, method='linear', lam_range=[0,0.1,1,10,100]):
"""
Plot learning curve for a given method
"""
# Split data, subset data, set model
(trnInd, valInd, tstInd) = split_data(Xtrn.shape[0], .70, 0, .30)
Xtrn_trn = Xtrn[trnInd]; Ytrn_trn = Ytrn[trnInd]
Xtrn_tst = Xtrn[tstInd]; Ytrn_tst = Ytrn[tstInd]
model = sk.linear_model.LinearRegression()
# Calculate rmse for each number of obs
obs_range = range(10, len(Xtrn_trn), 2500)
trn_err = []; tst_err = []
for i in obs_range:
if ((i-10) % 10000) == 0: print '\t training example: %i' % i
if method=='linear':
model.fit(Xtrn_trn[0:i,::], Ytrn_trn[0:i, 1:])
Ytrn_trn_pred = model.predict(Xtrn_trn[0:i,::])
Ytrn_tst_pred = model.predict(Xtrn_tst[0:i,::])
trn_err.append(rmse(Ytrn_trn[0:i, 1:], Ytrn_trn_pred))
tst_err.append(rmse(Ytrn_tst[0:i, 1:], Ytrn_tst_pred))
if method=='ridge':
# Use validation set to find best lambda for a given #of obs
#print "%i %i" % (len(Xtrn[0:i,::]),len(Ytrn[0:i,1:]))
(lam_star, val_err) = ridge_cv(Xtrn[0:i,::], Ytrn[0:i,1:], lam_range, n_trials = 5)
# Train model using lambda star
model = ridge_train(Xtrn[0:i,::], Ytrn_trn[0:i,::], lam_star)
Ytrn_trn_pred = ridge_pred(model, Xtrn_trn[0:i,::])
Ytrn_tst_pred = ridge_pred(model, Xtrn_tst[0:i,::])
trn_err.append(rmse(Ytrn_trn[0:i, 1:], Ytrn_trn_pred))
tst_err.append(rmse(Ytrn_tst[0:i, 1:], Ytrn_tst_pred))
# Plot curve
plt.plot(obs_range, trn_err)
plt.plot(obs_range, tst_err)
plt.legend(['train', 'test'], loc='upper left')
plt.show()
return
def bias_var():
"""
Plot bias/var tradeoff
"""
return
def output_Ytst(Ytst, tst_dir, f_out, save_csv=True, return_Y=False):
"""
Get Ytst ready for kaggle output, save CSV
"""
# Fix up test response
print(file_name + ': Forcing [0,1] bounds')
Ytst = force_bounds(Ytst)
print(file_name + ': Adding ID column to response')
Ytst_names = get_image_names(tst_dir)
Ytst = np.c_[Ytst_names, Ytst]
# Output submission [if desired]
if (save_csv):
print(file_name + ': Saving csv to ' + f_out)
colfmt = ['%i'] + ['%f'] * (Ytst.shape[1] - 1)
np.savetxt(f_out, Ytst, delimiter = ',', fmt = colfmt)
# Return Ytst matrix [if desired]
if (return_Y):
return Ytst
return
def svm_regression(Xtrn, Xtst, Ytrn, lam):
from sklearn.svm import SVR
model = SVR(kernel = 'linear', C=1/lam)
model.fit(Xtrn, Ytrn[::, 1:])
Ytst = model.predict(Xtst)
return Ytst
def main():
"""
Run experiment
"""
(Xtrn, Xtst, Ytrn, f_out) = read_X_Y(f_in_trn, f_in_tst, sol_dir, my_dim)
#lam_range = np.arange(0, 105, 5)
#lam_range = np.array([1,100,500,1000, 5000, 10000, 100000, 1000000])
#lam_range = np.array([0, 0.2, 0.5, 1, 10, 100])
#(lam_star, val_err) = ridge_cv(Xtrn[0:5000], Ytrn[0:5000], lam_range, n_trials = 10)
#ridge_cv_plot(val_err, lam_range)
#clf = sk.linear_model.RidgeCV(alphas = lam_range)
#clf.fit(Xtrn, Ytrn[::, 1:])
# Plot learning curve and bias/var tradeoff
#learning_curve(Xtrn, Ytrn, method='linear')
#model = ridge_train(Xtrn, Ytrn, 10)
#Ytst = ridge_pred(model, Xtst)
#model = ls_train(Xtrn, Ytrn)
#Ytst = ls_pred(model, Xtst)
Ytst = svm_regression(Xtrn, Xtst, Ytrn, 10)
output_Ytst(Ytst)
return
if __name__ == "__main__":
main()
| mit |
heli522/scikit-learn | sklearn/datasets/rcv1.py | 112 | 8170 | """RCV1 dataset.
"""
# Author: Tom Dupre la Tour
# License: BSD 3 clause
import logging
from os.path import exists, join
from gzip import GzipFile
from io import BytesIO
from contextlib import closing
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from ..utils.fixes import makedirs
from ..externals import joblib
from .svmlight_format import load_svmlight_files
from ..utils import shuffle as shuffle_
URL = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a13-vector-files/lyrl2004_vectors')
URL_topics = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a08-topic-qrels/rcv1-v2.topics.qrels.gz')
logger = logging.getLogger()
def fetch_rcv1(data_home=None, subset='all', download_if_missing=True,
random_state=None, shuffle=False):
"""Load the RCV1 multilabel dataset, downloading it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
============== =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
============== =====================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
subset: string, 'train', 'test', or 'all', default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : scipy csr array, dtype np.float64, shape (804414, 47236)
The array has 0.16% of non zero values.
dataset.target : scipy csr array, dtype np.uint8, shape (804414, 103)
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values.
dataset.sample_id : numpy array, dtype np.uint32, shape (804414,)
Identification number of each sample, as ordered in dataset.data.
dataset.target_names : numpy array, dtype object, length (103)
Names of each target (RCV1 topics), as ordered in dataset.target.
dataset.DESCR : string
Description of the RCV1 dataset.
Reference
---------
Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004). RCV1: A new
benchmark collection for text categorization research. The Journal of
Machine Learning Research, 5, 361-397.
"""
N_SAMPLES = 804414
N_FEATURES = 47236
N_CATEGORIES = 103
N_TRAIN = 23149
data_home = get_data_home(data_home=data_home)
rcv1_dir = join(data_home, "RCV1")
if download_if_missing:
makedirs(rcv1_dir, exist_ok=True)
samples_path = join(rcv1_dir, "samples.pkl")
sample_id_path = join(rcv1_dir, "sample_id.pkl")
sample_topics_path = join(rcv1_dir, "sample_topics.pkl")
topics_path = join(rcv1_dir, "topics_names.pkl")
# load data (X) and sample_id
if download_if_missing and (not exists(samples_path) or
not exists(sample_id_path)):
file_urls = ["%s_test_pt%d.dat.gz" % (URL, i) for i in range(4)]
file_urls.append("%s_train.dat.gz" % URL)
files = []
for file_url in file_urls:
logger.warning("Downloading %s" % file_url)
with closing(urlopen(file_url)) as online_file:
# buffer the full file in memory to make possible to Gzip to
# work correctly
f = BytesIO(online_file.read())
files.append(GzipFile(fileobj=f))
Xy = load_svmlight_files(files, n_features=N_FEATURES)
# Training data is before testing data
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
sample_id = sample_id.astype(np.uint32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(sample_id, sample_id_path, compress=9)
else:
X = joblib.load(samples_path)
sample_id = joblib.load(sample_id_path)
# load target (y), categories, and sample_id_bis
if download_if_missing and (not exists(sample_topics_path) or
not exists(topics_path)):
logger.warning("Downloading %s" % URL_topics)
with closing(urlopen(URL_topics)) as online_topics:
f = BytesIO(online_topics.read())
# parse the target file
n_cat = -1
n_doc = -1
doc_previous = -1
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
category_names = {}
for line in GzipFile(fileobj=f, mode='rb'):
line_components = line.decode("ascii").split(u" ")
if len(line_components) == 3:
cat, doc, _ = line_components
if cat not in category_names:
n_cat += 1
category_names[cat] = n_cat
doc = int(doc)
if doc != doc_previous:
doc_previous = doc
n_doc += 1
sample_id_bis[n_doc] = doc
y[n_doc, category_names[cat]] = 1
# Samples in X are ordered with sample_id,
# whereas in y, they are ordered with sample_id_bis.
permutation = _find_permutation(sample_id_bis, sample_id)
y = y[permutation, :]
# save category names in a list, with same order than y
categories = np.empty(N_CATEGORIES, dtype=object)
for k in category_names.keys():
categories[category_names[k]] = k
# reorder categories in lexicographic order
order = np.argsort(categories)
categories = categories[order]
y = sp.csr_matrix(y[:, order])
joblib.dump(y, sample_topics_path, compress=9)
joblib.dump(categories, topics_path, compress=9)
else:
y = joblib.load(sample_topics_path)
categories = joblib.load(topics_path)
if subset == 'all':
pass
elif subset == 'train':
X = X[:N_TRAIN, :]
y = y[:N_TRAIN, :]
sample_id = sample_id[:N_TRAIN]
elif subset == 'test':
X = X[N_TRAIN:, :]
y = y[N_TRAIN:, :]
sample_id = sample_id[N_TRAIN:]
else:
raise ValueError("Unknown subset parameter. Got '%s' instead of one"
" of ('all', 'train', test')" % subset)
if shuffle:
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
return Bunch(data=X, target=y, sample_id=sample_id,
target_names=categories, DESCR=__doc__)
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
def _find_permutation(a, b):
"""find the permutation from a to b"""
t = np.argsort(a)
u = np.argsort(b)
u_ = _inverse_permutation(u)
return t[u_]
| bsd-3-clause |
40423143/2017springcd_ag7 | plugin/liquid_tags/notebook.py | 235 | 10551 | """
Notebook Tag
------------
This is a liquid-style tag to include a static html rendering of an IPython
notebook in a blog post.
Syntax
------
{% notebook filename.ipynb [ cells[start:end] ]%}
The file should be specified relative to the ``notebooks`` subdirectory of the
content directory. Optionally, this subdirectory can be specified in the
config file:
NOTEBOOK_DIR = 'notebooks'
The cells[start:end] statement is optional, and can be used to specify which
block of cells from the notebook to include.
Requirements
------------
- The plugin requires IPython version 1.0 or above. It no longer supports the
standalone nbconvert package, which has been deprecated.
Details
-------
Because the notebook relies on some rather extensive custom CSS, the use of
this plugin requires additional CSS to be inserted into the blog theme.
After typing "make html" when using the notebook tag, a file called
``_nb_header.html`` will be produced in the main directory. The content
of the file should be included in the header of the theme. An easy way
to accomplish this is to add the following lines within the header template
of the theme you use:
{% if EXTRA_HEADER %}
{{ EXTRA_HEADER }}
{% endif %}
and in your ``pelicanconf.py`` file, include the line:
EXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')
this will insert the appropriate CSS. All efforts have been made to ensure
that this CSS will not override formats within the blog theme, but there may
still be some conflicts.
"""
import re
import os
from functools import partial
from .mdx_liquid_tags import LiquidTags
import IPython
IPYTHON_VERSION = IPython.version_info[0]
try:
import nbformat
except:
pass
if not IPYTHON_VERSION >= 1:
raise ValueError("IPython version 1.0+ required for notebook tag")
try:
from nbconvert.filters.highlight import _pygments_highlight
except ImportError:
try:
from IPython.nbconvert.filters.highlight import _pygments_highlight
except ImportError:
# IPython < 2.0
from IPython.nbconvert.filters.highlight import _pygment_highlight as _pygments_highlight
from pygments.formatters import HtmlFormatter
try:
from nbconvert.exporters import HTMLExporter
except ImportError:
from IPython.nbconvert.exporters import HTMLExporter
try:
from traitlets.config import Config
except ImportError:
from IPython.config import Config
try:
from nbconvert.preprocessors import Preprocessor
except ImportError:
try:
from IPython.nbconvert.preprocessors import Preprocessor
except ImportError:
# IPython < 2.0
from IPython.nbconvert.transformers import Transformer as Preprocessor
try:
from traitlets import Integer
except ImportError:
from IPython.utils.traitlets import Integer
from copy import deepcopy
#----------------------------------------------------------------------
# Some code that will be added to the header:
# Some of the following javascript/css include is adapted from
# IPython/nbconvert/templates/fullhtml.tpl, while some are custom tags
# specifically designed to make the results look good within the
# pelican-octopress theme.
JS_INCLUDE = r"""
<style type="text/css">
/* Overrides of notebook CSS for static HTML export */
div.entry-content {
overflow: visible;
padding: 8px;
}
.input_area {
padding: 0.2em;
}
a.heading-anchor {
white-space: normal;
}
.rendered_html
code {
font-size: .8em;
}
pre.ipynb {
color: black;
background: #f7f7f7;
border: none;
box-shadow: none;
margin-bottom: 0;
padding: 0;
margin: 0px;
font-size: 13px;
}
/* remove the prompt div from text cells */
div.text_cell .prompt {
display: none;
}
/* remove horizontal padding from text cells, */
/* so it aligns with outer body text */
div.text_cell_render {
padding: 0.5em 0em;
}
img.anim_icon{padding:0; border:0; vertical-align:middle; -webkit-box-shadow:none; -box-shadow:none}
div.collapseheader {
width=100%;
background-color:#d3d3d3;
padding: 2px;
cursor: pointer;
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
}
</style>
<script src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML" type="text/javascript"></script>
<script type="text/javascript">
init_mathjax = function() {
if (window.MathJax) {
// MathJax loaded
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ]
},
displayAlign: 'left', // Change this to 'center' to center equations.
"HTML-CSS": {
styles: {'.MathJax_Display': {"margin": 0}}
}
});
MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
}
}
init_mathjax();
</script>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script type="text/javascript">
jQuery(document).ready(function($) {
$("div.collapseheader").click(function () {
$header = $(this).children("span").first();
$codearea = $(this).children(".input_area");
console.log($(this).children());
$codearea.slideToggle(500, function () {
$header.text(function () {
return $codearea.is(":visible") ? "Collapse Code" : "Expand Code";
});
});
});
});
</script>
"""
CSS_WRAPPER = """
<style type="text/css">
{0}
</style>
"""
#----------------------------------------------------------------------
# Create a custom preprocessor
class SliceIndex(Integer):
"""An integer trait that accepts None"""
default_value = None
def validate(self, obj, value):
if value is None:
return value
else:
return super(SliceIndex, self).validate(obj, value)
class SubCell(Preprocessor):
"""A transformer to select a slice of the cells of a notebook"""
start = SliceIndex(0, config=True,
help="first cell of notebook to be converted")
end = SliceIndex(None, config=True,
help="last cell of notebook to be converted")
def preprocess(self, nb, resources):
nbc = deepcopy(nb)
if IPYTHON_VERSION < 3:
for worksheet in nbc.worksheets:
cells = worksheet.cells[:]
worksheet.cells = cells[self.start:self.end]
else:
nbc.cells = nbc.cells[self.start:self.end]
return nbc, resources
call = preprocess # IPython < 2.0
#----------------------------------------------------------------------
# Custom highlighter:
# instead of using class='highlight', use class='highlight-ipynb'
def custom_highlighter(source, language='ipython', metadata=None):
formatter = HtmlFormatter(cssclass='highlight-ipynb')
if not language:
language = 'ipython'
output = _pygments_highlight(source, formatter, language)
return output.replace('<pre>', '<pre class="ipynb">')
#----------------------------------------------------------------------
# Below is the pelican plugin code.
#
SYNTAX = "{% notebook /path/to/notebook.ipynb [ cells[start:end] ] [ language[language] ] %}"
FORMAT = re.compile(r"""^(\s+)?(?P<src>\S+)(\s+)?((cells\[)(?P<start>-?[0-9]*):(?P<end>-?[0-9]*)(\]))?(\s+)?((language\[)(?P<language>-?[a-z0-9\+\-]*)(\]))?(\s+)?$""")
@LiquidTags.register('notebook')
def notebook(preprocessor, tag, markup):
match = FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
if start:
start = int(start)
else:
start = 0
if end:
end = int(end)
else:
end = None
language_applied_highlighter = partial(custom_highlighter, language=language)
nb_dir = preprocessor.configs.getConfig('NOTEBOOK_DIR')
nb_path = os.path.join('content', nb_dir, src)
if not os.path.exists(nb_path):
raise ValueError("File {0} could not be found".format(nb_path))
# Create the custom notebook converter
c = Config({'CSSHTMLHeaderTransformer':
{'enabled':True, 'highlight_class':'.highlight-ipynb'},
'SubCell':
{'enabled':True, 'start':start, 'end':end}})
template_file = 'basic'
if IPYTHON_VERSION >= 3:
if os.path.exists('pelicanhtml_3.tpl'):
template_file = 'pelicanhtml_3'
elif IPYTHON_VERSION == 2:
if os.path.exists('pelicanhtml_2.tpl'):
template_file = 'pelicanhtml_2'
else:
if os.path.exists('pelicanhtml_1.tpl'):
template_file = 'pelicanhtml_1'
if IPYTHON_VERSION >= 2:
subcell_kwarg = dict(preprocessors=[SubCell])
else:
subcell_kwarg = dict(transformers=[SubCell])
exporter = HTMLExporter(config=c,
template_file=template_file,
filters={'highlight2html': language_applied_highlighter},
**subcell_kwarg)
# read and parse the notebook
with open(nb_path, encoding="utf-8") as f:
nb_text = f.read()
if IPYTHON_VERSION < 3:
nb_json = IPython.nbformat.current.reads_json(nb_text)
else:
try:
nb_json = nbformat.reads(nb_text, as_version=4)
except:
nb_json = IPython.nbformat.reads(nb_text, as_version=4)
(body, resources) = exporter.from_notebook_node(nb_json)
# if we haven't already saved the header, save it here.
if not notebook.header_saved:
print ("\n ** Writing styles to _nb_header.html: "
"this should be included in the theme. **\n")
header = '\n'.join(CSS_WRAPPER.format(css_line)
for css_line in resources['inlining']['css'])
header += JS_INCLUDE
with open('_nb_header.html', 'w', encoding="utf-8") as f:
f.write(header)
notebook.header_saved = True
# this will stash special characters so that they won't be transformed
# by subsequent processes.
body = preprocessor.configs.htmlStash.store(body, safe=True)
return body
notebook.header_saved = False
#----------------------------------------------------------------------
# This import allows notebook to be a Pelican plugin
from liquid_tags import register
| gpl-3.0 |
DonBeo/scikit-learn | sklearn/mixture/tests/test_gmm.py | 6 | 15189 | import unittest
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/datasets/tests/test_lfw.py | 228 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
jhaux/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 49 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
DonBeo/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 346 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
CodingCat/mxnet | tests/python/unittest/test_contrib_io.py | 23 | 1891 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet.ndarray as nd
from mxnet.gluon.data.vision.datasets import *
from mxnet.gluon.data.dataloader import *
from mxnet.contrib.io import *
from mxnet.test_utils import *
def test_contrib_DataLoaderIter():
def test_mnist_batches(batch_size, expected, last_batch='discard'):
dataset = MNIST(train=False)
dataloader = DataLoader(dataset, batch_size, last_batch=last_batch)
test_iter = DataLoaderIter(dataloader)
batch = next(test_iter)
assert batch.data[0].shape == (batch_size, 28, 28, 1)
assert batch.label[0].shape == (batch_size,)
count = 0
test_iter.reset()
for batch in test_iter:
count += 1
assert count == expected, "expected {} batches, given {}".format(expected, count)
num_examples = 10000
test_mnist_batches(50, num_examples // 50, 'discard')
test_mnist_batches(31, num_examples // 31, 'discard')
test_mnist_batches(31, num_examples // 31, 'rollover')
test_mnist_batches(31, num_examples // 31 + 1, 'keep')
if __name__ == "__main__":
test_contrib_DataLoaderIter()
| apache-2.0 |
arabenjamin/scikit-learn | examples/classification/plot_lda_qda.py | 163 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | examples/classification/plot_lda_qda.py | 163 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
ifuding/Kaggle | SVPC/Code/philly/BarisKanber.py | 3 | 14070 | """
A non-blending lightGBM model that incorporates portions and ideas from various public kernels
This kernel gives LB: 0.977 when the parameter 'debug' below is set to 0 but this implementation requires a machine with ~32 GB of memory
"""
import pandas as pd
import time
import numpy as np
from sklearn.cross_validation import train_test_split
import lightgbm as lgb
import gc
import matplotlib.pyplot as plt
import os
debug=1
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
def lgb_modelfit_nocv(params, dtrain, dvalid, predictors, target='target', objective='binary', metrics='auc',
feval=None, early_stopping_rounds=20, num_boost_round=3000, verbose_eval=10, categorical_features=None):
lgb_params = {
'boosting_type': 'gbdt',
'objective': objective,
'metric':metrics,
'learning_rate': 0.2,
#'is_unbalance': 'true', #because training data is unbalance (replaced with scale_pos_weight)
'num_leaves': 31, # we should let it be smaller than 2^(max_depth)
'max_depth': -1, # -1 means no limit
'min_child_samples': 20, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 255, # Number of bucketed bin for feature values
'subsample': 0.6, # Subsample ratio of the training instance.
'subsample_freq': 0, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.3, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 5, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'subsample_for_bin': 200000, # Number of samples for constructing bin
'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization
'reg_alpha': 0, # L1 regularization term on weights
'reg_lambda': 0, # L2 regularization term on weights
'nthread': 4,
'verbose': 0,
'metric':metrics
}
lgb_params.update(params)
print("preparing validation datasets")
xgtrain = lgb.Dataset(dtrain[predictors].values, label=dtrain[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
xgvalid = lgb.Dataset(dvalid[predictors].values, label=dvalid[target].values,
feature_name=predictors,
categorical_feature=categorical_features
)
evals_results = {}
bst1 = lgb.train(lgb_params,
xgtrain,
valid_sets=[xgtrain, xgvalid],
valid_names=['train','valid'],
evals_result=evals_results,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=10,
feval=feval)
print("\nModel Report")
print("bst1.best_iteration: ", bst1.best_iteration)
print(metrics+":", evals_results['valid'][metrics][bst1.best_iteration-1])
return (bst1,bst1.best_iteration)
def DO(frm,to,fileno):
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
print('loading train data...',frm,to)
train_df = pd.read_csv("../input/train.csv", parse_dates=['click_time'], skiprows=range(1,frm), nrows=to-frm, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
if debug:
test_df = pd.read_csv("../input/test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
else:
test_df = pd.read_csv("../input/test.csv", parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id'])
len_train = len(train_df)
train_df=train_df.append(test_df)
del test_df
gc.collect()
print('Extracting new features...')
train_df['hour'] = pd.to_datetime(train_df.click_time).dt.hour.astype('uint8')
train_df['day'] = pd.to_datetime(train_df.click_time).dt.day.astype('uint8')
gc.collect()
naddfeat=9
for i in range(0,naddfeat):
if i==0: selcols=['ip', 'channel']; QQ=4;
if i==1: selcols=['ip', 'device', 'os', 'app']; QQ=5;
if i==2: selcols=['ip', 'day', 'hour']; QQ=4;
if i==3: selcols=['ip', 'app']; QQ=4;
if i==4: selcols=['ip', 'app', 'os']; QQ=4;
if i==5: selcols=['ip', 'device']; QQ=4;
if i==6: selcols=['app', 'channel']; QQ=4;
if i==7: selcols=['ip', 'os']; QQ=5;
if i==8: selcols=['ip', 'device', 'os', 'app']; QQ=4;
print('selcols',selcols,'QQ',QQ)
filename='X%d_%d_%d.csv'%(i,frm,to)
if os.path.exists(filename):
if QQ==5:
gp=pd.read_csv(filename,header=None)
train_df['X'+str(i)]=gp
else:
gp=pd.read_csv(filename)
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
else:
if QQ==0:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].count().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==1:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].mean().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==2:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].var().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==3:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].skew().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==4:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].nunique().reset_index().\
rename(index=str, columns={selcols[len(selcols)-1]: 'X'+str(i)})
train_df = train_df.merge(gp, on=selcols[0:len(selcols)-1], how='left')
if QQ==5:
gp = train_df[selcols].groupby(by=selcols[0:len(selcols)-1])[selcols[len(selcols)-1]].cumcount()
train_df['X'+str(i)]=gp.values
if not debug:
gp.to_csv(filename,index=False)
del gp
gc.collect()
print('doing nextClick')
predictors=[]
new_feature = 'nextClick'
filename='nextClick_%d_%d.csv'%(frm,to)
if os.path.exists(filename):
print('loading from save file')
QQ=pd.read_csv(filename).values
else:
D=2**26
train_df['category'] = (train_df['ip'].astype(str) + "_" + train_df['app'].astype(str) + "_" + train_df['device'].astype(str) \
+ "_" + train_df['os'].astype(str)).apply(hash) % D
click_buffer= np.full(D, 3000000000, dtype=np.uint32)
train_df['epochtime']= train_df['click_time'].astype(np.int64) // 10 ** 9
next_clicks= []
for category, t in zip(reversed(train_df['category'].values), reversed(train_df['epochtime'].values)):
next_clicks.append(click_buffer[category]-t)
click_buffer[category]= t
del(click_buffer)
QQ= list(reversed(next_clicks))
if not debug:
print('saving')
pd.DataFrame(QQ).to_csv(filename,index=False)
train_df[new_feature] = QQ
predictors.append(new_feature)
train_df[new_feature+'_shift'] = pd.DataFrame(QQ).shift(+1).values
predictors.append(new_feature+'_shift')
del QQ
gc.collect()
print('grouping by ip-day-hour combination...')
gp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','hour'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_tcount'})
train_df = train_df.merge(gp, on=['ip','day','hour'], how='left')
del gp
gc.collect()
print('grouping by ip-app combination...')
gp = train_df[['ip', 'app', 'channel']].groupby(by=['ip', 'app'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_count'})
train_df = train_df.merge(gp, on=['ip','app'], how='left')
del gp
gc.collect()
print('grouping by ip-app-os combination...')
gp = train_df[['ip','app', 'os', 'channel']].groupby(by=['ip', 'app', 'os'])[['channel']].count().reset_index().rename(index=str, columns={'channel': 'ip_app_os_count'})
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
del gp
gc.collect()
# Adding features with var and mean hour (inspired from nuhsikander's script)
print('grouping by : ip_day_chl_var_hour')
gp = train_df[['ip','day','hour','channel']].groupby(by=['ip','day','channel'])[['hour']].var().reset_index().rename(index=str, columns={'hour': 'ip_tchan_count'})
train_df = train_df.merge(gp, on=['ip','day','channel'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_os_var_hour')
gp = train_df[['ip','app', 'os', 'hour']].groupby(by=['ip', 'app', 'os'])[['hour']].var().reset_index().rename(index=str, columns={'hour': 'ip_app_os_var'})
train_df = train_df.merge(gp, on=['ip','app', 'os'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_channel_var_day')
gp = train_df[['ip','app', 'channel', 'day']].groupby(by=['ip', 'app', 'channel'])[['day']].var().reset_index().rename(index=str, columns={'day': 'ip_app_channel_var_day'})
train_df = train_df.merge(gp, on=['ip','app', 'channel'], how='left')
del gp
gc.collect()
print('grouping by : ip_app_chl_mean_hour')
gp = train_df[['ip','app', 'channel','hour']].groupby(by=['ip', 'app', 'channel'])[['hour']].mean().reset_index().rename(index=str, columns={'hour': 'ip_app_channel_mean_hour'})
print("merging...")
train_df = train_df.merge(gp, on=['ip','app', 'channel'], how='left')
del gp
gc.collect()
print("vars and data type: ")
train_df.info()
train_df['ip_tcount'] = train_df['ip_tcount'].astype('uint16')
train_df['ip_app_count'] = train_df['ip_app_count'].astype('uint16')
train_df['ip_app_os_count'] = train_df['ip_app_os_count'].astype('uint16')
target = 'is_attributed'
predictors.extend(['app','device','os', 'channel', 'hour', 'day',
'ip_tcount', 'ip_tchan_count', 'ip_app_count',
'ip_app_os_count', 'ip_app_os_var',
'ip_app_channel_var_day','ip_app_channel_mean_hour'])
categorical = ['app', 'device', 'os', 'channel', 'hour', 'day']
for i in range(0,naddfeat):
predictors.append('X'+str(i))
print('predictors',predictors)
test_df = train_df[len_train:]
val_df = train_df[(len_train-val_size):len_train]
train_df = train_df[:(len_train-val_size)]
print("train size: ", len(train_df))
print("valid size: ", len(val_df))
print("test size : ", len(test_df))
sub = pd.DataFrame()
sub['click_id'] = test_df['click_id'].astype('int')
gc.collect()
print("Training...")
start_time = time.time()
params = {
'learning_rate': 0.20,
#'is_unbalance': 'true', # replaced with scale_pos_weight argument
'num_leaves': 7, # 2^max_depth - 1
'max_depth': 3, # -1 means no limit
'min_child_samples': 100, # Minimum number of data need in a child(min_data_in_leaf)
'max_bin': 100, # Number of bucketed bin for feature values
'subsample': 0.7, # Subsample ratio of the training instance.
'subsample_freq': 1, # frequence of subsample, <=0 means no enable
'colsample_bytree': 0.9, # Subsample ratio of columns when constructing each tree.
'min_child_weight': 0, # Minimum sum of instance weight(hessian) needed in a child(leaf)
'scale_pos_weight':200 # because training data is extremely unbalanced
}
(bst,best_iteration) = lgb_modelfit_nocv(params,
train_df,
val_df,
predictors,
target,
objective='binary',
metrics='auc',
early_stopping_rounds=30,
verbose_eval=True,
num_boost_round=1000,
categorical_features=categorical)
print('[{}]: model training time'.format(time.time() - start_time))
del train_df
del val_df
gc.collect()
print('Plot feature importances...')
ax = lgb.plot_importance(bst, max_num_features=100)
plt.show()
print("Predicting...")
sub['is_attributed'] = bst.predict(test_df[predictors],num_iteration=best_iteration)
if not debug:
print("writing...")
sub.to_csv('sub_it%d.csv.gz'%(fileno),index=False,compression='gzip')
print("done...")
return sub
nrows=184903891-1
nchunk=40000000
val_size=2500000
frm=nrows-75000000
if debug:
frm=0
nchunk=100000
val_size=10000
to=frm+nchunk
sub=DO(frm,to,0)
| apache-2.0 |
arabenjamin/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 380 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/sandbox/panel/panelmod.py | 27 | 14526 | """
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from __future__ import print_function
from statsmodels.compat.python import range, reduce
from statsmodels.tools.tools import categorical
from statsmodels.regression.linear_model import GLS, WLS
import numpy as np
__all__ = ["PanelModel"]
from pandas import LongPanel, __version__
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : array, (nobs, nre) or (nobs,)
array of group/category observations
sigma : array, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : array, (nobs, nobs)
covariance matrix of error
omegainv : array, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : array, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(LongPanel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
---------
endog : array-like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.LongPanel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data == None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, LongPanel):
# raise ValueError("Only pandas.LongPanel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: doesn't conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name == None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts == False and dummies == False:
return mean
elif counts == True and dummies == False:
return mean, dummy.sum(1)
elif counts == True and dummies == True:
return mean, dummy.sum(1), dummy
elif counts == False and dummies == True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
------
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle", "gls", "be",
"fe"]: # get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between \
estimator" % s)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
import pandas
from pandas import LongPanel
import statsmodels.api as sm
import numpy.lib.recfunctions as nprf
data = sm.datasets.grunfeld.load()
# Baltagi doesn't include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_panda = LongPanel.fromRecords(panel_arr, major_field='year',
minor_field='firm')
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=False)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation doesn't actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print(omega)
print(np.linalg.cholesky(omega))
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print(np.max(np.abs(omegacomp - omega)))
#check
#print(np.dot(omegainv,omega)
print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev shouldn't be column
print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print(np.max(np.abs(omega_ - omega)))
print(np.max(np.abs(omegainv_ - omegainv)))
print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print(np.max(np.abs(np.dot(Qgr, groups))))
| bsd-3-clause |
tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/results/cross_validate_objects_BMED_8813_HAP_scaled_method_I.py | 1 | 3959 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data_method_I import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:12]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
likelyzhao/mxnet | python/mxnet/model.py | 1 | 38989 | # pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = True
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore is 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names,
update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
kvstore.init(idx, arg_params[param_names[idx]])
if update_on_kvstore:
kvstore.pull(idx, param_on_devs, priority=-idx)
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
# push gradient, priority is negative index
kvstore.push(index, grad_list, priority=-index)
# pull back the weights
kvstore.pull(index, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
if kvstore:
# push gradient, priority is negative index
kvstore.push(index, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(index, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution latter
w, g = p
updater(index*num_device+k, g, w)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(optimizer)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
return
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
if os.path.exists('%s-symbol.json' % prefix):
symbol = sym.load('%s-symbol.json' % prefix)
else:
symbol = None
save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**dict(input_shapes))
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **dict(input_shapes))
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
else:
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and not '_async' in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializier : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| apache-2.0 |
megaumi/django | django/contrib/gis/geoip2/base.py | 73 | 8630 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 212 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |